Merge "Remove unused declarations of dead code."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 7769aad..454caa9 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -146,13 +146,13 @@
$(HOST_CORE_IMAGE_optimizing_32) \
$(HOST_CORE_IMAGE_interpreter_64) \
$(HOST_CORE_IMAGE_interpreter_32) \
- $(HOST_OUT_EXECUTABLES)/patchoatd
+ patchoatd-host
ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_optimizing_64) \
$(TARGET_CORE_IMAGE_optimizing_32) \
$(TARGET_CORE_IMAGE_interpreter_64) \
$(TARGET_CORE_IMAGE_interpreter_32) \
- $(TARGET_OUT_EXECUTABLES)/patchoatd
+ patchoatd-target
ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -161,10 +161,10 @@
ART_GTEST_dexoptanalyzer_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
- $(HOST_OUT_EXECUTABLES)/dexoptanalyzerd
+ dexoptanalyzerd-host
ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
- dexoptanalyzerd
+ dexoptanalyzerd-target
ART_GTEST_image_space_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -172,57 +172,59 @@
$(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
ART_GTEST_dex2oat_test_HOST_DEPS := \
- $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
+ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
+ dex2oatd-host
ART_GTEST_dex2oat_test_TARGET_DEPS := \
- $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
+ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
+ dex2oatd-target
ART_GTEST_dex2oat_image_test_HOST_DEPS := \
- $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
+ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
+ dex2oatd-host
ART_GTEST_dex2oat_image_test_TARGET_DEPS := \
- $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
+ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
+ dex2oatd-target
# TODO: document why this is needed.
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
# The dexdiag test requires the dexdiag utility.
-ART_GTEST_dexdiag_test_HOST_DEPS := \
- $(HOST_OUT_EXECUTABLES)/dexdiag
-ART_GTEST_dexdiag_test_TARGET_DEPS := \
- dexdiag
+ART_GTEST_dexdiag_test_HOST_DEPS := dexdiag-host
+ART_GTEST_dexdiag_test_TARGET_DEPS := dexdiag-target
# The dexdump test requires an image and the dexdump utility.
# TODO: rename into dexdump when migration completes
ART_GTEST_dexdump_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- $(HOST_OUT_EXECUTABLES)/dexdump2
+ dexdump2-host
ART_GTEST_dexdump_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- dexdump2
+ dexdump2-target
# The dexlayout test requires an image and the dexlayout utility.
# TODO: rename into dexdump when migration completes
ART_GTEST_dexlayout_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- $(HOST_OUT_EXECUTABLES)/dexlayout \
- $(HOST_OUT_EXECUTABLES)/dexdump2
+ dexlayout-host \
+ dexdump2-host
ART_GTEST_dexlayout_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- dexlayout \
- dexdump2
+ dexlayout-target \
+ dexdump2-target
# The dexlist test requires an image and the dexlist utility.
ART_GTEST_dexlist_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- $(HOST_OUT_EXECUTABLES)/dexlist
+ dexlist-host
ART_GTEST_dexlist_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- dexlist
+ dexlist-target
# The imgdiag test has dependencies on core.oat since it needs to load it during the test.
# For the host, also add the installed tool (in the base size, that should suffice). For the
@@ -230,30 +232,28 @@
ART_GTEST_imgdiag_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- $(HOST_OUT_EXECUTABLES)/imgdiagd
+ imgdiagd-host
ART_GTEST_imgdiag_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- imgdiagd
+ imgdiagd-target
# Oatdump test requires an image and oatfile to dump.
ART_GTEST_oatdump_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- $(HOST_OUT_EXECUTABLES)/oatdumpd \
- $(HOST_OUT_EXECUTABLES)/oatdumpds
+ oatdumpd-host \
+ oatdumpds-host
ART_GTEST_oatdump_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- oatdump
+ oatdumpd-target
ART_GTEST_oatdump_image_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS)
ART_GTEST_oatdump_image_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS)
# Profile assistant tests requires profman utility.
-ART_GTEST_profile_assistant_test_HOST_DEPS := \
- $(HOST_OUT_EXECUTABLES)/profmand
-ART_GTEST_profile_assistant_test_TARGET_DEPS := \
- profman
+ART_GTEST_profile_assistant_test_HOST_DEPS := profmand-host
+ART_GTEST_profile_assistant_test_TARGET_DEPS := profman-target
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 8599471..1a2d9aa 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -355,6 +355,7 @@
"jni/jni_cfi_test.cc",
"optimizing/codegen_test.cc",
"optimizing/load_store_analysis_test.cc",
+ "optimizing/load_store_elimination_test.cc",
"optimizing/optimizing_cfi_test.cc",
"optimizing/scheduler_test.cc",
],
diff --git a/compiler/compiler.h b/compiler/compiler.h
index cfed6d5..3aa84f8 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -46,12 +46,6 @@
kOptimizing
};
- enum JniOptimizationFlags {
- kNone = 0x0,
- kFastNative = 0x1,
- kCriticalNative = 0x2,
- };
-
static Compiler* Create(CompilerDriver* driver, Kind kind);
virtual void Init() = 0;
@@ -71,8 +65,7 @@
virtual CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- JniOptimizationFlags optimization_flags) const = 0;
+ const DexFile& dex_file) const = 0;
virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index 518b0ec..b409eb2 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -20,6 +20,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
#include "class_linker-inl.h"
+#include "code_item_accessors-inl.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "dex_instruction.h"
@@ -43,7 +44,7 @@
typedef bool MatchFn(Matcher* matcher);
template <size_t size>
- static bool Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]);
+ static bool Match(const CodeItemDataAccessor* code_item, MatchFn* const (&pattern)[size]);
// Match and advance.
@@ -62,22 +63,20 @@
bool IPutOnThis();
private:
- explicit Matcher(const DexFile::CodeItem* code_item)
+ explicit Matcher(const CodeItemDataAccessor* code_item)
: code_item_(code_item),
- instruction_(code_item->Instructions().begin()),
- pos_(0u),
- mark_(0u) { }
+ instruction_(code_item->begin()) {}
- static bool DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size);
+ static bool DoMatch(const CodeItemDataAccessor* code_item, MatchFn* const* pattern, size_t size);
- const DexFile::CodeItem* const code_item_;
+ const CodeItemDataAccessor* const code_item_;
DexInstructionIterator instruction_;
- size_t pos_;
- size_t mark_;
+ size_t pos_ = 0u;
+ size_t mark_ = 0u;
};
template <size_t size>
-bool Matcher::Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]) {
+bool Matcher::Match(const CodeItemDataAccessor* code_item, MatchFn* const (&pattern)[size]) {
return DoMatch(code_item, pattern, size);
}
@@ -122,12 +121,12 @@
}
bool Matcher::IPutOnThis() {
- DCHECK_NE(code_item_->ins_size_, 0u);
+ DCHECK_NE(code_item_->InsSize(), 0u);
return IsInstructionIPut(instruction_->Opcode()) &&
- instruction_->VRegB_22c() == code_item_->registers_size_ - code_item_->ins_size_;
+ instruction_->VRegB_22c() == code_item_->RegistersSize() - code_item_->InsSize();
}
-bool Matcher::DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size) {
+bool Matcher::DoMatch(const CodeItemDataAccessor* code_item, MatchFn* const* pattern, size_t size) {
Matcher matcher(code_item);
while (matcher.pos_ != size) {
if (!pattern[matcher.pos_](&matcher)) {
@@ -158,7 +157,7 @@
// Return the forwarded arguments and check that all remaining arguments are zero.
// If the check fails, return static_cast<size_t>(-1).
-size_t CountForwardedConstructorArguments(const DexFile::CodeItem* code_item,
+size_t CountForwardedConstructorArguments(const CodeItemDataAccessor* code_item,
const Instruction* invoke_direct,
uint16_t zero_vreg_mask) {
DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
@@ -167,7 +166,7 @@
uint32_t args[Instruction::kMaxVarArgRegs];
invoke_direct->GetVarArgs(args);
uint16_t this_vreg = args[0];
- DCHECK_EQ(this_vreg, code_item->registers_size_ - code_item->ins_size_); // Checked by verifier.
+ DCHECK_EQ(this_vreg, code_item->RegistersSize() - code_item->InsSize()); // Checked by verifier.
size_t forwarded = 1u;
while (forwarded < number_of_args &&
args[forwarded] == this_vreg + forwarded &&
@@ -249,7 +248,7 @@
return true;
}
-bool DoAnalyseConstructor(const DexFile::CodeItem* code_item,
+bool DoAnalyseConstructor(const CodeItemDataAccessor* code_item,
ArtMethod* method,
/*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -292,17 +291,17 @@
DCHECK(method->IsConstructor());
DCHECK(code_item != nullptr);
if (!method->GetDeclaringClass()->IsVerified() ||
- code_item->insns_size_in_code_units_ > kMaxCodeUnits ||
- code_item->registers_size_ > kMaxVRegs ||
+ code_item->InsnsSizeInCodeUnits() > kMaxCodeUnits ||
+ code_item->RegistersSize() > kMaxVRegs ||
!Matcher::Match(code_item, kConstructorPattern)) {
return false;
}
// Verify the invoke, prevent a few odd cases and collect IPUTs.
- uint16_t this_vreg = code_item->registers_size_ - code_item->ins_size_;
+ uint16_t this_vreg = code_item->RegistersSize() - code_item->InsSize();
uint16_t zero_vreg_mask = 0u;
- for (const DexInstructionPcPair& pair : code_item->Instructions()) {
+ for (const DexInstructionPcPair& pair : *code_item) {
const Instruction& instruction = pair.Inst();
if (instruction.Opcode() == Instruction::RETURN_VOID) {
break;
@@ -314,7 +313,7 @@
// We allow forwarding constructors only if they pass more arguments
// to prevent infinite recursion.
if (target_method->GetDeclaringClass() == method->GetDeclaringClass() &&
- instruction.VRegA_35c() <= code_item->ins_size_) {
+ instruction.VRegA_35c() <= code_item->InsSize()) {
return false;
}
size_t forwarded = CountForwardedConstructorArguments(code_item, &instruction, zero_vreg_mask);
@@ -322,14 +321,13 @@
return false;
}
if (target_method->GetDeclaringClass()->IsObjectClass()) {
- DCHECK_EQ(target_method->GetCodeItem()->Instructions().begin()->Opcode(),
- Instruction::RETURN_VOID);
+ DCHECK_EQ(CodeItemDataAccessor(target_method).begin()->Opcode(), Instruction::RETURN_VOID);
} else {
- const DexFile::CodeItem* target_code_item = target_method->GetCodeItem();
- if (target_code_item == nullptr) {
+ CodeItemDataAccessor target_code_item = CodeItemDataAccessor::CreateNullable(target_method);
+ if (!target_code_item.HasCodeItem()) {
return false; // Native constructor?
}
- if (!DoAnalyseConstructor(target_code_item, target_method, iputs)) {
+ if (!DoAnalyseConstructor(&target_code_item, target_method, iputs)) {
return false;
}
// Prune IPUTs with zero input.
@@ -365,7 +363,7 @@
} // anonymous namespace
-bool AnalyseConstructor(const DexFile::CodeItem* code_item,
+bool AnalyseConstructor(const CodeItemDataAccessor* code_item,
ArtMethod* method,
InlineMethod* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -429,27 +427,27 @@
InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant");
bool InlineMethodAnalyser::AnalyseMethodCode(ArtMethod* method, InlineMethod* result) {
- const DexFile::CodeItem* code_item = method->GetCodeItem();
- if (code_item == nullptr) {
+ CodeItemDataAccessor code_item = CodeItemDataAccessor::CreateNullable(method);
+ if (!code_item.HasCodeItem()) {
// Native or abstract.
return false;
}
- return AnalyseMethodCode(code_item,
+ return AnalyseMethodCode(&code_item,
MethodReference(method->GetDexFile(), method->GetDexMethodIndex()),
method->IsStatic(),
method,
result);
}
-bool InlineMethodAnalyser::AnalyseMethodCode(const DexFile::CodeItem* code_item,
+bool InlineMethodAnalyser::AnalyseMethodCode(const CodeItemDataAccessor* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
InlineMethod* result) {
// We currently support only plain return or 2-instruction methods.
- DCHECK_NE(code_item->insns_size_in_code_units_, 0u);
- Instruction::Code opcode = code_item->Instructions().begin()->Opcode();
+ DCHECK_NE(code_item->InsnsSizeInCodeUnits(), 0u);
+ Instruction::Code opcode = code_item->begin()->Opcode();
switch (opcode) {
case Instruction::RETURN_VOID:
@@ -518,15 +516,15 @@
strncmp(method_name, "-", strlen("-")) == 0;
}
-bool InlineMethodAnalyser::AnalyseReturnMethod(const DexFile::CodeItem* code_item,
+bool InlineMethodAnalyser::AnalyseReturnMethod(const CodeItemDataAccessor* code_item,
InlineMethod* result) {
- DexInstructionIterator return_instruction = code_item->Instructions().begin();
+ DexInstructionIterator return_instruction = code_item->begin();
Instruction::Code return_opcode = return_instruction->Opcode();
uint32_t reg = return_instruction->VRegA_11x();
- uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ uint32_t arg_start = code_item->RegistersSize() - code_item->InsSize();
DCHECK_GE(reg, arg_start);
DCHECK_LT((return_opcode == Instruction::RETURN_WIDE) ? reg + 1 : reg,
- code_item->registers_size_);
+ code_item->RegistersSize());
if (result != nullptr) {
result->opcode = kInlineOpReturnArg;
@@ -540,9 +538,9 @@
return true;
}
-bool InlineMethodAnalyser::AnalyseConstMethod(const DexFile::CodeItem* code_item,
+bool InlineMethodAnalyser::AnalyseConstMethod(const CodeItemDataAccessor* code_item,
InlineMethod* result) {
- DexInstructionIterator instruction = code_item->Instructions().begin();
+ DexInstructionIterator instruction = code_item->begin();
const Instruction* return_instruction = instruction->Next();
Instruction::Code return_opcode = return_instruction->Opcode();
if (return_opcode != Instruction::RETURN &&
@@ -551,13 +549,13 @@
}
int32_t return_reg = return_instruction->VRegA_11x();
- DCHECK_LT(return_reg, code_item->registers_size_);
+ DCHECK_LT(return_reg, code_item->RegistersSize());
int32_t const_value = instruction->VRegB();
if (instruction->Opcode() == Instruction::CONST_HIGH16) {
const_value <<= 16;
}
- DCHECK_LT(instruction->VRegA(), code_item->registers_size_);
+ DCHECK_LT(instruction->VRegA(), code_item->RegistersSize());
if (instruction->VRegA() != return_reg) {
return false; // Not returning the value set by const?
}
@@ -571,12 +569,12 @@
return true;
}
-bool InlineMethodAnalyser::AnalyseIGetMethod(const DexFile::CodeItem* code_item,
+bool InlineMethodAnalyser::AnalyseIGetMethod(const CodeItemDataAccessor* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
InlineMethod* result) {
- DexInstructionIterator instruction = code_item->Instructions().begin();
+ DexInstructionIterator instruction = code_item->begin();
Instruction::Code opcode = instruction->Opcode();
DCHECK(IsInstructionIGet(opcode));
@@ -591,17 +589,17 @@
uint32_t return_reg = return_instruction->VRegA_11x();
DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1 : return_reg,
- code_item->registers_size_);
+ code_item->RegistersSize());
uint32_t dst_reg = instruction->VRegA_22c();
uint32_t object_reg = instruction->VRegB_22c();
uint32_t field_idx = instruction->VRegC_22c();
- uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ uint32_t arg_start = code_item->RegistersSize() - code_item->InsSize();
DCHECK_GE(object_reg, arg_start);
- DCHECK_LT(object_reg, code_item->registers_size_);
+ DCHECK_LT(object_reg, code_item->RegistersSize());
uint32_t object_arg = object_reg - arg_start;
- DCHECK_LT(opcode == Instruction::IGET_WIDE ? dst_reg + 1 : dst_reg, code_item->registers_size_);
+ DCHECK_LT(opcode == Instruction::IGET_WIDE ? dst_reg + 1 : dst_reg, code_item->RegistersSize());
if (dst_reg != return_reg) {
return false; // Not returning the value retrieved by IGET?
}
@@ -635,18 +633,18 @@
return true;
}
-bool InlineMethodAnalyser::AnalyseIPutMethod(const DexFile::CodeItem* code_item,
+bool InlineMethodAnalyser::AnalyseIPutMethod(const CodeItemDataAccessor* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
InlineMethod* result) {
- DexInstructionIterator instruction = code_item->Instructions().begin();
+ DexInstructionIterator instruction = code_item->begin();
Instruction::Code opcode = instruction->Opcode();
DCHECK(IsInstructionIPut(opcode));
const Instruction* return_instruction = instruction->Next();
Instruction::Code return_opcode = return_instruction->Opcode();
- uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ uint32_t arg_start = code_item->RegistersSize() - code_item->InsSize();
uint16_t return_arg_plus1 = 0u;
if (return_opcode != Instruction::RETURN_VOID) {
if (return_opcode != Instruction::RETURN &&
@@ -658,7 +656,7 @@
uint32_t return_reg = return_instruction->VRegA_11x();
DCHECK_GE(return_reg, arg_start);
DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1u : return_reg,
- code_item->registers_size_);
+ code_item->RegistersSize());
return_arg_plus1 = return_reg - arg_start + 1u;
}
@@ -666,9 +664,9 @@
uint32_t object_reg = instruction->VRegB_22c();
uint32_t field_idx = instruction->VRegC_22c();
DCHECK_GE(object_reg, arg_start);
- DCHECK_LT(object_reg, code_item->registers_size_);
+ DCHECK_LT(object_reg, code_item->RegistersSize());
DCHECK_GE(src_reg, arg_start);
- DCHECK_LT(opcode == Instruction::IPUT_WIDE ? src_reg + 1 : src_reg, code_item->registers_size_);
+ DCHECK_LT(opcode == Instruction::IPUT_WIDE ? src_reg + 1 : src_reg, code_item->RegistersSize());
uint32_t object_arg = object_reg - arg_start;
uint32_t src_arg = src_reg - arg_start;
diff --git a/compiler/dex/inline_method_analyser.h b/compiler/dex/inline_method_analyser.h
index a35e97f..cde2147 100644
--- a/compiler/dex/inline_method_analyser.h
+++ b/compiler/dex/inline_method_analyser.h
@@ -30,6 +30,8 @@
namespace art {
+class CodeItemDataAccessor;
+
namespace verifier {
class MethodVerifier;
} // namespace verifier
@@ -121,21 +123,21 @@
static bool IsSyntheticAccessor(MethodReference ref);
private:
- static bool AnalyseMethodCode(const DexFile::CodeItem* code_item,
+ static bool AnalyseMethodCode(const CodeItemDataAccessor* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
InlineMethod* result)
REQUIRES_SHARED(Locks::mutator_lock_);
- static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
- static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
- static bool AnalyseIGetMethod(const DexFile::CodeItem* code_item,
+ static bool AnalyseReturnMethod(const CodeItemDataAccessor* code_item, InlineMethod* result);
+ static bool AnalyseConstMethod(const CodeItemDataAccessor* code_item, InlineMethod* result);
+ static bool AnalyseIGetMethod(const CodeItemDataAccessor* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
InlineMethod* result)
REQUIRES_SHARED(Locks::mutator_lock_);
- static bool AnalyseIPutMethod(const DexFile::CodeItem* code_item,
+ static bool AnalyseIPutMethod(const CodeItemDataAccessor* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a9d27ef..32d0bbe 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -46,6 +46,7 @@
#include "dex/verified_method.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
+#include "dex_file_annotations.h"
#include "dex_instruction-inl.h"
#include "driver/compiler_options.h"
#include "gc/accounting/card_table-inl.h"
@@ -511,40 +512,11 @@
InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
// Leaving this empty will trigger the generic JNI version
} else {
- // Look-up the ArtMethod associated with this code_item (if any)
- // -- It is later used to lookup any [optimization] annotations for this method.
- ScopedObjectAccess soa(self);
-
- // TODO: Lookup annotation from DexFile directly without resolving method.
- ArtMethod* method =
- Runtime::Current()->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- dex_file,
- method_idx,
- dex_cache,
- class_loader,
- /* referrer */ nullptr,
- invoke_type);
-
// Query any JNI optimization annotations such as @FastNative or @CriticalNative.
- Compiler::JniOptimizationFlags optimization_flags = Compiler::kNone;
- if (UNLIKELY(method == nullptr)) {
- // Failed method resolutions happen very rarely, e.g. ancestor class cannot be resolved.
- DCHECK(self->IsExceptionPending());
- self->ClearException();
- } else if (method->IsAnnotatedWithFastNative()) {
- // TODO: Will no longer need this CHECK once we have verifier checking this.
- CHECK(!method->IsAnnotatedWithCriticalNative());
- optimization_flags = Compiler::kFastNative;
- } else if (method->IsAnnotatedWithCriticalNative()) {
- // TODO: Will no longer need this CHECK once we have verifier checking this.
- CHECK(!method->IsAnnotatedWithFastNative());
- optimization_flags = Compiler::kCriticalNative;
- }
+ access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
+ dex_file, dex_file.GetClassDef(class_def_idx), method_idx);
- compiled_method = driver->GetCompiler()->JniCompile(access_flags,
- method_idx,
- dex_file,
- optimization_flags);
+ compiled_method = driver->GetCompiler()->JniCompile(access_flags, method_idx, dex_file);
CHECK(compiled_method != nullptr);
}
} else if ((access_flags & kAccAbstract) != 0) {
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3460efe..daf64d1 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -55,10 +55,10 @@
namespace art {
enum class JniKind {
- kNormal = Compiler::kNone, // Regular kind of un-annotated natives.
- kFast = Compiler::kFastNative, // Native method annotated with @FastNative.
- kCritical = Compiler::kCriticalNative, // Native method annotated with @CriticalNative.
- kCount = Compiler::kCriticalNative + 1 // How many different types of JNIs we can have.
+ kNormal, // Regular kind of un-annotated natives.
+ kFast, // Native method annotated with @FastNative.
+ kCritical, // Native method annotated with @CriticalNative.
+ kCount // How many different types of JNIs we can have.
};
// Used to initialize array sizes that want to have different state per current jni.
@@ -2205,8 +2205,8 @@
ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
- EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
- EXPECT_FALSE(method->IsAnnotatedWithFastNative());
+ EXPECT_FALSE(method->IsCriticalNative());
+ EXPECT_FALSE(method->IsFastNative());
}
// TODO: just rename the java functions to the standard convention and remove duplicated tests
@@ -2227,8 +2227,8 @@
ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
- EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
- EXPECT_TRUE(method->IsAnnotatedWithFastNative());
+ EXPECT_FALSE(method->IsCriticalNative());
+ EXPECT_TRUE(method->IsFastNative());
}
// TODO: just rename the java functions to the standard convention and remove duplicated tests
@@ -2256,8 +2256,8 @@
ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
- EXPECT_TRUE(method->IsAnnotatedWithCriticalNative());
- EXPECT_FALSE(method->IsAnnotatedWithFastNative());
+ EXPECT_TRUE(method->IsCriticalNative());
+ EXPECT_FALSE(method->IsFastNative());
EXPECT_EQ(0, gJava_myClassNatives_criticalNative_calls[gCurrentJni]);
env_->CallStaticVoidMethod(jklass_, jmethod_);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b3177aa..b93b05c 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -52,8 +52,6 @@
namespace art {
-using JniOptimizationFlags = Compiler::JniOptimizationFlags;
-
template <PointerSize kPointerSize>
static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
@@ -120,8 +118,7 @@
static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- JniOptimizationFlags optimization_flags) {
+ const DexFile& dex_file) {
const bool is_native = (access_flags & kAccNative) != 0;
CHECK(is_native);
const bool is_static = (access_flags & kAccStatic) != 0;
@@ -131,10 +128,10 @@
const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures();
// i.e. if the method was annotated with @FastNative
- const bool is_fast_native = (optimization_flags == Compiler::kFastNative);
+ const bool is_fast_native = (access_flags & kAccFastNative) != 0u;
// i.e. if the method was annotated with @CriticalNative
- bool is_critical_native = (optimization_flags == Compiler::kCriticalNative);
+ bool is_critical_native = (access_flags & kAccCriticalNative) != 0u;
VLOG(jni) << "JniCompile: Method :: "
<< dex_file.PrettyMethod(method_idx, /* with signature */ true)
@@ -781,14 +778,13 @@
CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- Compiler::JniOptimizationFlags optimization_flags) {
+ const DexFile& dex_file) {
if (Is64BitInstructionSet(compiler->GetInstructionSet())) {
return ArtJniCompileMethodInternal<PointerSize::k64>(
- compiler, access_flags, method_idx, dex_file, optimization_flags);
+ compiler, access_flags, method_idx, dex_file);
} else {
return ArtJniCompileMethodInternal<PointerSize::k32>(
- compiler, access_flags, method_idx, dex_file, optimization_flags);
+ compiler, access_flags, method_idx, dex_file);
}
}
diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h
index 26c32a3..3fcce55 100644
--- a/compiler/jni/quick/jni_compiler.h
+++ b/compiler/jni/quick/jni_compiler.h
@@ -28,8 +28,7 @@
CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- Compiler::JniOptimizationFlags optimization_flags);
+ const DexFile& dex_file);
} // namespace art
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
index 5a8ac59..8b1812a 100644
--- a/compiler/optimizing/load_store_analysis.cc
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -22,111 +22,130 @@
// The number of heap locations for most of the methods stays below this threshold.
constexpr size_t kMaxNumberOfHeapLocations = 32;
-// Check if array indices array[idx1 +/- CONST] and array[idx2] MAY alias.
-static bool BinaryOpAndIndexMayAlias(const HBinaryOperation* idx1, const HInstruction* idx2) {
- DCHECK(idx1 != nullptr);
- DCHECK(idx2 != nullptr);
+// Test if two integer ranges [l1,h1] and [l2,h2] overlap.
+// Note that the ranges are inclusive on both ends.
+// l1|------|h1
+// l2|------|h2
+static bool CanIntegerRangesOverlap(int64_t l1, int64_t h1, int64_t l2, int64_t h2) {
+ return std::max(l1, l2) <= std::min(h1, h2);
+}
- if (!idx1->IsAdd() && !idx1->IsSub()) {
+static bool IsAddOrSub(const HInstruction* instruction) {
+ return instruction->IsAdd() || instruction->IsSub();
+}
+
+static bool CanBinaryOpAndIndexAlias(const HBinaryOperation* idx1,
+ const size_t vector_length1,
+ const HInstruction* idx2,
+ const size_t vector_length2) {
+ if (!IsAddOrSub(idx1)) {
// We currently only support Add and Sub operations.
return true;
}
-
- HConstant* cst = idx1->GetConstantRight();
- if (cst == nullptr || cst->IsArithmeticZero()) {
+ if (idx1->AsBinaryOperation()->GetLeastConstantLeft() != idx2) {
+ // Cannot analyze [i+CONST1] and [j].
+ return true;
+ }
+ if (!idx1->GetConstantRight()->IsIntConstant()) {
return true;
}
- if (idx1->GetLeastConstantLeft() == idx2) {
- // for example, array[idx1 + 1] and array[idx1]
- return false;
- }
-
- return true;
+ // Since 'i' are the same in [i+CONST] and [i],
+ // further compare [CONST] and [0].
+ int64_t l1 = idx1->IsAdd() ?
+ idx1->GetConstantRight()->AsIntConstant()->GetValue() :
+ -idx1->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t l2 = 0;
+ int64_t h1 = l1 + (vector_length1 - 1);
+ int64_t h2 = l2 + (vector_length2 - 1);
+ return CanIntegerRangesOverlap(l1, h1, l2, h2);
}
-// Check if Add and Sub MAY alias when used as indices in arrays.
-static bool BinaryOpsMayAlias(const HBinaryOperation* idx1, const HBinaryOperation* idx2) {
- DCHECK(idx1!= nullptr);
- DCHECK(idx2 != nullptr);
-
- HConstant* idx1_cst = idx1->GetConstantRight();
- HInstruction* idx1_other = idx1->GetLeastConstantLeft();
- HConstant* idx2_cst = idx2->GetConstantRight();
- HInstruction* idx2_other = idx2->GetLeastConstantLeft();
-
- if (idx1_cst == nullptr || idx1_other == nullptr ||
- idx2_cst == nullptr || idx2_other == nullptr) {
- // We only analyze patterns like [i +/- CONST].
+static bool CanBinaryOpsAlias(const HBinaryOperation* idx1,
+ const size_t vector_length1,
+ const HBinaryOperation* idx2,
+ const size_t vector_length2) {
+ if (!IsAddOrSub(idx1) || !IsAddOrSub(idx2)) {
+ // We currently only support Add and Sub operations.
+ return true;
+ }
+ if (idx1->AsBinaryOperation()->GetLeastConstantLeft() !=
+ idx2->AsBinaryOperation()->GetLeastConstantLeft()) {
+ // Cannot analyze [i+CONST1] and [j+CONST2].
+ return true;
+ }
+ if (!idx1->GetConstantRight()->IsIntConstant() ||
+ !idx2->GetConstantRight()->IsIntConstant()) {
return true;
}
- if (idx1_other != idx2_other) {
- // For example, [j+1] and [k+1] MAY alias.
- return true;
- }
-
- if ((idx1->IsAdd() && idx2->IsAdd()) ||
- (idx1->IsSub() && idx2->IsSub())) {
- return idx1_cst->AsIntConstant()->GetValue() == idx2_cst->AsIntConstant()->GetValue();
- }
-
- if ((idx1->IsAdd() && idx2->IsSub()) ||
- (idx1->IsSub() && idx2->IsAdd())) {
- // [i + CONST1] and [i - CONST2] MAY alias iff CONST1 == -CONST2.
- // By checking CONST1 == -CONST2, following cases are handled:
- // - Zero constants case [i+0] and [i-0] is handled.
- // - Overflow cases are handled, for example:
- // [i+0x80000000] and [i-0x80000000];
- // [i+0x10] and [i-0xFFFFFFF0].
- // - Other cases [i+CONST1] and [i-CONST2] without any overflow is handled.
- return idx1_cst->AsIntConstant()->GetValue() == -(idx2_cst->AsIntConstant()->GetValue());
- }
-
- // All other cases, MAY alias.
- return true;
+ // Since 'i' are the same in [i+CONST1] and [i+CONST2],
+ // further compare [CONST1] and [CONST2].
+ int64_t l1 = idx1->IsAdd() ?
+ idx1->GetConstantRight()->AsIntConstant()->GetValue() :
+ -idx1->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t l2 = idx2->IsAdd() ?
+ idx2->GetConstantRight()->AsIntConstant()->GetValue() :
+ -idx2->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t h1 = l1 + (vector_length1 - 1);
+ int64_t h2 = l2 + (vector_length2 - 1);
+ return CanIntegerRangesOverlap(l1, h1, l2, h2);
}
-// The following array index cases are handled:
-// [i] and [i]
-// [CONST1] and [CONST2]
-// [i] and [i+CONST]
-// [i] and [i-CONST]
-// [i+CONST1] and [i+CONST2]
-// [i-CONST1] and [i-CONST2]
-// [i+CONST1] and [i-CONST2]
-// [i-CONST1] and [i+CONST2]
-// For other complicated cases, we rely on other passes like GVN and simpilfier
-// to optimize these cases before this pass.
-// For example: [i+j+k+10] and [i+k+10+j] shall be optimized to [i7+10] and [i7+10].
-bool HeapLocationCollector::CanArrayIndicesAlias(const HInstruction* idx1,
- const HInstruction* idx2) const {
+bool HeapLocationCollector::CanArrayElementsAlias(const HInstruction* idx1,
+ const size_t vector_length1,
+ const HInstruction* idx2,
+ const size_t vector_length2) const {
DCHECK(idx1 != nullptr);
DCHECK(idx2 != nullptr);
+ DCHECK_GE(vector_length1, HeapLocation::kScalar);
+ DCHECK_GE(vector_length2, HeapLocation::kScalar);
+ // [i] and [i].
if (idx1 == idx2) {
- // [i] and [i]
return true;
}
+
+ // [CONST1] and [CONST2].
if (idx1->IsIntConstant() && idx2->IsIntConstant()) {
- // [CONST1] and [CONST2]
- return idx1->AsIntConstant()->GetValue() == idx2->AsIntConstant()->GetValue();
+ int64_t l1 = idx1->AsIntConstant()->GetValue();
+ int64_t l2 = idx2->AsIntConstant()->GetValue();
+ // To avoid any overflow in following CONST+vector_length calculation,
+ // use int64_t instead of int32_t.
+ int64_t h1 = l1 + (vector_length1 - 1);
+ int64_t h2 = l2 + (vector_length2 - 1);
+ return CanIntegerRangesOverlap(l1, h1, l2, h2);
}
- if (idx1->IsBinaryOperation() && !BinaryOpAndIndexMayAlias(idx1->AsBinaryOperation(), idx2)) {
- // [i] and [i+/-CONST]
- return false;
- }
- if (idx2->IsBinaryOperation() && !BinaryOpAndIndexMayAlias(idx2->AsBinaryOperation(), idx1)) {
- // [i+/-CONST] and [i]
- return false;
+ // [i+CONST] and [i].
+ if (idx1->IsBinaryOperation() &&
+ idx1->AsBinaryOperation()->GetConstantRight() != nullptr &&
+ idx1->AsBinaryOperation()->GetLeastConstantLeft() == idx2) {
+ return CanBinaryOpAndIndexAlias(idx1->AsBinaryOperation(),
+ vector_length1,
+ idx2,
+ vector_length2);
}
- if (idx1->IsBinaryOperation() && idx2->IsBinaryOperation()) {
- // [i+/-CONST1] and [i+/-CONST2]
- if (!BinaryOpsMayAlias(idx1->AsBinaryOperation(), idx2->AsBinaryOperation())) {
- return false;
- }
+ // [i] and [i+CONST].
+ if (idx2->IsBinaryOperation() &&
+ idx2->AsBinaryOperation()->GetConstantRight() != nullptr &&
+ idx2->AsBinaryOperation()->GetLeastConstantLeft() == idx1) {
+ return CanBinaryOpAndIndexAlias(idx2->AsBinaryOperation(),
+ vector_length2,
+ idx1,
+ vector_length1);
+ }
+
+ // [i+CONST1] and [i+CONST2].
+ if (idx1->IsBinaryOperation() &&
+ idx1->AsBinaryOperation()->GetConstantRight() != nullptr &&
+ idx2->IsBinaryOperation() &&
+ idx2->AsBinaryOperation()->GetConstantRight() != nullptr) {
+ return CanBinaryOpsAlias(idx1->AsBinaryOperation(),
+ vector_length1,
+ idx2->AsBinaryOperation(),
+ vector_length2);
}
// By default, MAY alias.
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 5a1df45..999026c 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -102,23 +102,26 @@
class HeapLocation : public ArenaObject<kArenaAllocLSA> {
public:
static constexpr size_t kInvalidFieldOffset = -1;
-
+ // Default value for heap locations which are not vector data.
+ static constexpr size_t kScalar = 1;
// TODO: more fine-grained array types.
static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
HeapLocation(ReferenceInfo* ref_info,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index)
: ref_info_(ref_info),
offset_(offset),
index_(index),
+ vector_length_(vector_length),
declaring_class_def_index_(declaring_class_def_index),
value_killed_by_loop_side_effects_(true) {
DCHECK(ref_info != nullptr);
DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
(offset != kInvalidFieldOffset && index == nullptr));
- if (ref_info->IsSingleton() && !IsArrayElement()) {
+ if (ref_info->IsSingleton() && !IsArray()) {
// Assume this location's value cannot be killed by loop side effects
// until proven otherwise.
value_killed_by_loop_side_effects_ = false;
@@ -128,6 +131,7 @@
ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
size_t GetOffset() const { return offset_; }
HInstruction* GetIndex() const { return index_; }
+ size_t GetVectorLength() const { return vector_length_; }
// Returns the definition of declaring class' dex index.
// It's kDeclaringClassDefIndexForArrays for an array element.
@@ -135,7 +139,7 @@
return declaring_class_def_index_;
}
- bool IsArrayElement() const {
+ bool IsArray() const {
return index_ != nullptr;
}
@@ -148,15 +152,26 @@
}
private:
- ReferenceInfo* const ref_info_; // reference for instance/static field or array access.
- const size_t offset_; // offset of static/instance field.
- HInstruction* const index_; // index of an array element.
- const int16_t declaring_class_def_index_; // declaring class's def's dex index.
- bool value_killed_by_loop_side_effects_; // value of this location may be killed by loop
- // side effects because this location is stored
- // into inside a loop. This gives
- // better info on whether a singleton's location
- // value may be killed by loop side effects.
+ // Reference for instance/static field, array element or vector data.
+ ReferenceInfo* const ref_info_;
+ // Offset of static/instance field.
+ // Invalid when this HeapLocation is not field.
+ const size_t offset_;
+ // Index of an array element or starting index of vector data.
+ // Invalid when this HeapLocation is not array.
+ HInstruction* const index_;
+ // Vector length of vector data.
+ // When this HeapLocation is not vector data, it's value is kScalar.
+ const size_t vector_length_;
+ // Declaring class's def's dex index.
+ // Invalid when this HeapLocation is not field access.
+ const int16_t declaring_class_def_index_;
+
+ // Value of this location may be killed by loop side effects
+ // because this location is stored into inside a loop.
+ // This gives better info on whether a singleton's location
+ // value may be killed by loop side effects.
+ bool value_killed_by_loop_side_effects_;
DISALLOW_COPY_AND_ASSIGN(HeapLocation);
};
@@ -218,14 +233,26 @@
return nullptr;
}
- size_t GetArrayAccessHeapLocation(HInstruction* array, HInstruction* index) const {
+ size_t GetFieldHeapLocation(HInstruction* object, const FieldInfo* field) const {
+ DCHECK(object != nullptr);
+ DCHECK(field != nullptr);
+ return FindHeapLocationIndex(FindReferenceInfoOf(HuntForOriginalReference(object)),
+ field->GetFieldOffset().SizeValue(),
+ nullptr,
+ HeapLocation::kScalar,
+ field->GetDeclaringClassDefIndex());
+ }
+
+ size_t GetArrayHeapLocation(HInstruction* array,
+ HInstruction* index,
+ size_t vector_length = HeapLocation::kScalar) const {
DCHECK(array != nullptr);
DCHECK(index != nullptr);
- HInstruction* original_ref = HuntForOriginalReference(array);
- ReferenceInfo* ref_info = FindReferenceInfoOf(original_ref);
- return FindHeapLocationIndex(ref_info,
+ DCHECK_GE(vector_length, HeapLocation::kScalar);
+ return FindHeapLocationIndex(FindReferenceInfoOf(HuntForOriginalReference(array)),
HeapLocation::kInvalidFieldOffset,
index,
+ vector_length,
HeapLocation::kDeclaringClassDefIndexForArrays);
}
@@ -242,15 +269,26 @@
}
// Find and return the heap location index in heap_locations_.
+ // NOTE: When heap locations are created, potentially aliasing/overlapping
+ // accesses are given different indexes. This find function also
+ // doesn't take aliasing/overlapping into account. For example,
+ // this function returns three different indexes for:
+ // - ref_info=array, index=i, vector_length=kScalar;
+ // - ref_info=array, index=i, vector_length=2;
+ // - ref_info=array, index=i, vector_length=4;
+ // In later analysis, ComputeMayAlias() and MayAlias() compute and tell whether
+ // these indexes alias.
size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index) const {
for (size_t i = 0; i < heap_locations_.size(); i++) {
HeapLocation* loc = heap_locations_[i];
if (loc->GetReferenceInfo() == ref_info &&
loc->GetOffset() == offset &&
loc->GetIndex() == index &&
+ loc->GetVectorLength() == vector_length &&
loc->GetDeclaringClassDefIndex() == declaring_class_def_index) {
return i;
}
@@ -315,7 +353,10 @@
return true;
}
- bool CanArrayIndicesAlias(const HInstruction* i1, const HInstruction* i2) const;
+ bool CanArrayElementsAlias(const HInstruction* idx1,
+ const size_t vector_length1,
+ const HInstruction* idx2,
+ const size_t vector_length2) const;
// `index1` and `index2` are indices in the array of collected heap locations.
// Returns the position in the bit vector that tracks whether the two heap
@@ -340,7 +381,7 @@
HeapLocation* loc2 = heap_locations_[index2];
if (loc1->GetOffset() != loc2->GetOffset()) {
// Either two different instance fields, or one is an instance
- // field and the other is an array element.
+ // field and the other is an array data.
return false;
}
if (loc1->GetDeclaringClassDefIndex() != loc2->GetDeclaringClassDefIndex()) {
@@ -350,10 +391,12 @@
if (!CanReferencesAlias(loc1->GetReferenceInfo(), loc2->GetReferenceInfo())) {
return false;
}
- if (loc1->IsArrayElement() && loc2->IsArrayElement()) {
- HInstruction* array_index1 = loc1->GetIndex();
- HInstruction* array_index2 = loc2->GetIndex();
- if (!CanArrayIndicesAlias(array_index1, array_index2)) {
+ if (loc1->IsArray() && loc2->IsArray()) {
+ HInstruction* idx1 = loc1->GetIndex();
+ HInstruction* idx2 = loc2->GetIndex();
+ size_t vector_length1 = loc1->GetVectorLength();
+ size_t vector_length2 = loc2->GetVectorLength();
+ if (!CanArrayElementsAlias(idx1, vector_length1, idx2, vector_length2)) {
return false;
}
ReferenceInfo* ref_info = loc1->GetReferenceInfo();
@@ -383,14 +426,15 @@
HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index) {
HInstruction* original_ref = HuntForOriginalReference(ref);
ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
size_t heap_location_idx = FindHeapLocationIndex(
- ref_info, offset, index, declaring_class_def_index);
+ ref_info, offset, index, vector_length, declaring_class_def_index);
if (heap_location_idx == kHeapLocationNotFound) {
HeapLocation* heap_loc = new (GetGraph()->GetAllocator())
- HeapLocation(ref_info, offset, index, declaring_class_def_index);
+ HeapLocation(ref_info, offset, index, vector_length, declaring_class_def_index);
heap_locations_.push_back(heap_loc);
return heap_loc;
}
@@ -403,12 +447,19 @@
}
const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
const size_t offset = field_info.GetFieldOffset().SizeValue();
- return GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
+ return GetOrCreateHeapLocation(ref,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index);
}
- void VisitArrayAccess(HInstruction* array, HInstruction* index) {
- GetOrCreateHeapLocation(array, HeapLocation::kInvalidFieldOffset,
- index, HeapLocation::kDeclaringClassDefIndexForArrays);
+ void VisitArrayAccess(HInstruction* array, HInstruction* index, size_t vector_length) {
+ GetOrCreateHeapLocation(array,
+ HeapLocation::kInvalidFieldOffset,
+ index,
+ vector_length,
+ HeapLocation::kDeclaringClassDefIndexForArrays);
}
void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
@@ -456,12 +507,30 @@
// since we cannot accurately track the fields.
void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
- VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, HeapLocation::kScalar);
CreateReferenceInfoForReferenceType(instruction);
}
void VisitArraySet(HArraySet* instruction) OVERRIDE {
- VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, HeapLocation::kScalar);
+ has_heap_stores_ = true;
+ }
+
+ void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, instruction->GetVectorLength());
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitVecStore(HVecStore* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, instruction->GetVectorLength());
has_heap_stores_ = true;
}
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index b41e1e4..56361a8 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -78,11 +78,12 @@
// Test queries on HeapLocationCollector's ref info and index records.
ReferenceInfo* ref = heap_location_collector.FindReferenceInfoOf(array);
- size_t field_off = HeapLocation::kInvalidFieldOffset;
+ size_t field = HeapLocation::kInvalidFieldOffset;
+ size_t vec = HeapLocation::kScalar;
size_t class_def = HeapLocation::kDeclaringClassDefIndexForArrays;
- size_t loc1 = heap_location_collector.FindHeapLocationIndex(ref, field_off, c1, class_def);
- size_t loc2 = heap_location_collector.FindHeapLocationIndex(ref, field_off, c2, class_def);
- size_t loc3 = heap_location_collector.FindHeapLocationIndex(ref, field_off, index, class_def);
+ size_t loc1 = heap_location_collector.FindHeapLocationIndex(ref, field, c1, vec, class_def);
+ size_t loc2 = heap_location_collector.FindHeapLocationIndex(ref, field, c2, vec, class_def);
+ size_t loc3 = heap_location_collector.FindHeapLocationIndex(ref, field, index, vec, class_def);
// must find this reference info for array in HeapLocationCollector.
ASSERT_TRUE(ref != nullptr);
// must find these heap locations;
@@ -167,10 +168,8 @@
// Test queries on HeapLocationCollector's ref info and index records.
ReferenceInfo* ref = heap_location_collector.FindReferenceInfoOf(object);
- size_t loc1 = heap_location_collector.FindHeapLocationIndex(
- ref, 10, nullptr, kUnknownClassDefIndex);
- size_t loc2 = heap_location_collector.FindHeapLocationIndex(
- ref, 20, nullptr, kUnknownClassDefIndex);
+ size_t loc1 = heap_location_collector.GetFieldHeapLocation(object, &get_field10->GetFieldInfo());
+ size_t loc2 = heap_location_collector.GetFieldHeapLocation(object, &get_field20->GetFieldInfo());
// must find references info for object and in HeapLocationCollector.
ASSERT_TRUE(ref != nullptr);
// must find these heap locations.
@@ -247,31 +246,236 @@
size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
// Test alias: array[0] and array[1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, c0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, c1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0] and array[i-0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+1] and array[i-1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+1] and array[1-i]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, rev_sub1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, rev_sub1);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+1] and array[i-(-1)]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_neg1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_neg1);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
}
+TEST_F(LoadStoreAnalysisTest, ArrayAliasingTest) {
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ graph_->BuildDominatorTree();
+
+ HInstruction* array = new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
+ HInstruction* index = new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c6 = graph_->GetIntConstant(6);
+ HInstruction* c8 = graph_->GetIntConstant(8);
+
+ HInstruction* arr_set_0 = new (GetAllocator()) HArraySet(array,
+ c0,
+ c0,
+ DataType::Type::kInt32,
+ 0);
+ HInstruction* arr_set_1 = new (GetAllocator()) HArraySet(array,
+ c1,
+ c0,
+ DataType::Type::kInt32,
+ 0);
+ HInstruction* arr_set_i = new (GetAllocator()) HArraySet(array,
+ index,
+ c0,
+ DataType::Type::kInt32,
+ 0);
+
+ HVecOperation* v1 = new (GetAllocator()) HVecReplicateScalar(GetAllocator(),
+ c1,
+ DataType::Type::kInt32,
+ 4,
+ kNoDexPc);
+ HVecOperation* v2 = new (GetAllocator()) HVecReplicateScalar(GetAllocator(),
+ c1,
+ DataType::Type::kInt32,
+ 2,
+ kNoDexPc);
+ HInstruction* i_add6 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c6);
+ HInstruction* i_add8 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c8);
+
+ HInstruction* vstore_0 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ c0,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_1 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ c1,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_8 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ c8,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ index,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i_add6 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ i_add6,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i_add8 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ i_add8,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i_add6_vlen2 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ i_add6,
+ v2,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 2,
+ kNoDexPc);
+
+ entry->AddInstruction(array);
+ entry->AddInstruction(index);
+
+ entry->AddInstruction(arr_set_0);
+ entry->AddInstruction(arr_set_1);
+ entry->AddInstruction(arr_set_i);
+ entry->AddInstruction(v1);
+ entry->AddInstruction(v2);
+ entry->AddInstruction(i_add6);
+ entry->AddInstruction(i_add8);
+ entry->AddInstruction(vstore_0);
+ entry->AddInstruction(vstore_1);
+ entry->AddInstruction(vstore_8);
+ entry->AddInstruction(vstore_i);
+ entry->AddInstruction(vstore_i_add6);
+ entry->AddInstruction(vstore_i_add8);
+ entry->AddInstruction(vstore_i_add6_vlen2);
+
+ LoadStoreAnalysis lsa(graph_);
+ lsa.Run();
+ const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
+
+ // LSA/HeapLocationCollector should see those instructions.
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 10U);
+ ASSERT_TRUE(heap_location_collector.HasHeapStores());
+
+ // Test queries on HeapLocationCollector's aliasing matrix after load store analysis.
+ size_t loc1, loc2;
+
+ // Test alias: array[0] and array[0,1,2,3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0] and array[8,9,10,11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[1] and array[8,9,10,11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[1] and array[0,1,2,3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0,1,2,3] and array[8,9,10,11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0,1,2,3] and array[1,2,3,4]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c1, 4);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0] and array[i,i+1,i+2,i+3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i] and array[0,1,2,3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i] and array[i,i+1,i+2,i+3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i] and array[i+8,i+9,i+10,i+11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i+6,i+7,i+8,i+9] and array[i+8,i+9,i+10,i+11]
+ // Test partial overlap.
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 4);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i+6,i+7] and array[i,i+1,i+2,i+3]
+ // Test different vector lengths.
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 2);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i+6,i+7] and array[i+8,i+9,i+10,i+11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 2);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+}
+
TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
@@ -359,33 +563,33 @@
size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
// Test alias: array[i+0x80000000] and array[i-0x80000000]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0x80000000);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000000);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x80000000);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0x10] and array[i-0xFFFFFFF0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0x10);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0xFFFFFFF0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x10);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0xFFFFFFF0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0x7FFFFFFF] and array[i-0x80000001]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0x7FFFFFFF);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000001);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x7FFFFFFF);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000001);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0] and array[i-0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Should not alias:
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000000);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000001);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000001);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
// Should not alias:
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000000);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
}
@@ -443,10 +647,10 @@
// times the original reference has been transformed by BoundType,
// NullCheck, IntermediateAddress, etc.
ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 1U);
- size_t loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, c1);
- size_t loc2 = heap_location_collector.GetArrayAccessHeapLocation(bound_type, c1);
- size_t loc3 = heap_location_collector.GetArrayAccessHeapLocation(null_check, c1);
- size_t loc4 = heap_location_collector.GetArrayAccessHeapLocation(inter_addr, c1);
+ size_t loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
+ size_t loc2 = heap_location_collector.GetArrayHeapLocation(bound_type, c1);
+ size_t loc3 = heap_location_collector.GetArrayHeapLocation(null_check, c1);
+ size_t loc4 = heap_location_collector.GetArrayHeapLocation(inter_addr, c1);
ASSERT_TRUE(loc1 != HeapLocationCollector::kHeapLocationNotFound);
ASSERT_EQ(loc1, loc2);
ASSERT_EQ(loc1, loc3);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index c899613..66806d8 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -83,7 +83,8 @@
DCHECK(load != nullptr);
DCHECK(load->IsInstanceFieldGet() ||
load->IsStaticFieldGet() ||
- load->IsArrayGet());
+ load->IsArrayGet() ||
+ load->IsVecLoad());
HInstruction* substitute = substitute_instructions_for_loads_[i];
DCHECK(substitute != nullptr);
// Keep tracing substitute till one that's not removed.
@@ -98,7 +99,10 @@
// At this point, stores in possibly_removed_stores_ can be safely removed.
for (HInstruction* store : possibly_removed_stores_) {
- DCHECK(store->IsInstanceFieldSet() || store->IsStaticFieldSet() || store->IsArraySet());
+ DCHECK(store->IsInstanceFieldSet() ||
+ store->IsStaticFieldSet() ||
+ store->IsArraySet() ||
+ store->IsVecStore());
store->GetBlock()->RemoveInstruction(store);
}
@@ -137,7 +141,9 @@
void KeepIfIsStore(HInstruction* heap_value) {
if (heap_value == kDefaultHeapValue ||
heap_value == kUnknownHeapValue ||
- !(heap_value->IsInstanceFieldSet() || heap_value->IsArraySet())) {
+ !(heap_value->IsInstanceFieldSet() ||
+ heap_value->IsArraySet() ||
+ heap_value->IsVecStore())) {
return;
}
auto idx = std::find(possibly_removed_stores_.begin(),
@@ -302,11 +308,12 @@
HInstruction* ref,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index) {
HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
size_t idx = heap_location_collector_.FindHeapLocationIndex(
- ref_info, offset, index, declaring_class_def_index);
+ ref_info, offset, index, vector_length, declaring_class_def_index);
DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
@@ -319,7 +326,9 @@
return;
}
if (heap_value != kUnknownHeapValue) {
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
+ if (heap_value->IsInstanceFieldSet() ||
+ heap_value->IsArraySet() ||
+ heap_value->IsVecStore()) {
HInstruction* store = heap_value;
// This load must be from a singleton since it's from the same
// field/element that a "removed" store puts the value. That store
@@ -367,12 +376,13 @@
HInstruction* ref,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index,
HInstruction* value) {
HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
size_t idx = heap_location_collector_.FindHeapLocationIndex(
- ref_info, offset, index, declaring_class_def_index);
+ ref_info, offset, index, vector_length, declaring_class_def_index);
DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
@@ -414,7 +424,9 @@
if (!same_value) {
if (possibly_redundant) {
- DCHECK(instruction->IsInstanceFieldSet() || instruction->IsArraySet());
+ DCHECK(instruction->IsInstanceFieldSet() ||
+ instruction->IsArraySet() ||
+ instruction->IsVecStore());
// Put the store as the heap value. If the value is loaded from heap
// by a load later, this store isn't really redundant.
heap_values[idx] = instruction;
@@ -427,8 +439,24 @@
if (i == idx) {
continue;
}
- if (heap_values[i] == value) {
- // Same value should be kept even if aliasing happens.
+ if (heap_values[i] == value && !instruction->IsVecOperation()) {
+ // For field/array, same value should be kept even if aliasing happens.
+ //
+ // For vector values , this is NOT safe. For example:
+ // packed_data = [0xA, 0xB, 0xC, 0xD]; <-- Different values in each lane.
+ // VecStore array[i ,i+1,i+2,i+3] = packed_data;
+ // VecStore array[i+1,i+2,i+3,i+4] = packed_data; <-- We are here (partial overlap).
+ // VecLoad vx = array[i,i+1,i+2,i+3]; <-- Cannot be eliminated.
+ //
+ // TODO: to allow such 'same value' optimization on vector data,
+ // LSA needs to report more fine-grain MAY alias information:
+ // (1) May alias due to two vector data partial overlap.
+ // e.g. a[i..i+3] and a[i+1,..,i+4].
+ // (2) May alias due to two vector data may complete overlap each other.
+ // e.g. a[i..i+3] and b[i..i+3].
+ // (3) May alias but the exact relationship between two locations is unknown.
+ // e.g. a[i..i+3] and b[j..j+3], where values of a,b,i,j are all unknown.
+ // This 'same value' optimization can apply only on case (2).
continue;
}
if (heap_values[i] == kUnknownHeapValue) {
@@ -446,7 +474,12 @@
HInstruction* obj = instruction->InputAt(0);
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
- VisitGetLocation(instruction, obj, offset, nullptr, declaring_class_def_index);
+ VisitGetLocation(instruction,
+ obj,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index);
}
void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
@@ -454,14 +487,25 @@
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
HInstruction* value = instruction->InputAt(1);
- VisitSetLocation(instruction, obj, offset, nullptr, declaring_class_def_index, value);
+ VisitSetLocation(instruction,
+ obj,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index,
+ value);
}
void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
HInstruction* cls = instruction->InputAt(0);
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
- VisitGetLocation(instruction, cls, offset, nullptr, declaring_class_def_index);
+ VisitGetLocation(instruction,
+ cls,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index);
}
void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
@@ -469,7 +513,13 @@
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
HInstruction* value = instruction->InputAt(1);
- VisitSetLocation(instruction, cls, offset, nullptr, declaring_class_def_index, value);
+ VisitSetLocation(instruction,
+ cls,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index,
+ value);
}
void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
@@ -479,6 +529,7 @@
array,
HeapLocation::kInvalidFieldOffset,
index,
+ HeapLocation::kScalar,
HeapLocation::kDeclaringClassDefIndexForArrays);
}
@@ -490,6 +541,33 @@
array,
HeapLocation::kInvalidFieldOffset,
index,
+ HeapLocation::kScalar,
+ HeapLocation::kDeclaringClassDefIndexForArrays,
+ value);
+ }
+
+ void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ size_t vector_length = instruction->GetVectorLength();
+ VisitGetLocation(instruction,
+ array,
+ HeapLocation::kInvalidFieldOffset,
+ index,
+ vector_length,
+ HeapLocation::kDeclaringClassDefIndexForArrays);
+ }
+
+ void VisitVecStore(HVecStore* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ HInstruction* value = instruction->InputAt(2);
+ size_t vector_length = instruction->GetVectorLength();
+ VisitSetLocation(instruction,
+ array,
+ HeapLocation::kInvalidFieldOffset,
+ index,
+ vector_length,
HeapLocation::kDeclaringClassDefIndexForArrays,
value);
}
@@ -503,7 +581,9 @@
continue;
}
// A store is kept as the heap value for possibly removed stores.
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
+ if (heap_value->IsInstanceFieldSet() ||
+ heap_value->IsArraySet() ||
+ heap_value->IsVecStore()) {
// Check whether the reference for a store is used by an environment local of
// HDeoptimize.
HInstruction* reference = heap_value->InputAt(0);
@@ -661,11 +741,6 @@
return;
}
- // TODO: analyze VecLoad/VecStore better.
- if (graph_->HasSIMD()) {
- return;
- }
-
LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_, stats_);
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
lse_visitor.VisitBasicBlock(block);
diff --git a/compiler/optimizing/load_store_elimination_test.cc b/compiler/optimizing/load_store_elimination_test.cc
new file mode 100644
index 0000000..6f42d96
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination_test.cc
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "side_effects_analysis.h"
+#include "load_store_analysis.h"
+#include "load_store_elimination.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+class LoadStoreEliminationTest : public OptimizingUnitTest {
+ public:
+ LoadStoreEliminationTest() : pool_() {}
+
+ void PerformLSE() {
+ graph_->BuildDominatorTree();
+ SideEffectsAnalysis side_effects(graph_);
+ side_effects.Run();
+ LoadStoreAnalysis lsa(graph_);
+ lsa.Run();
+ LoadStoreElimination lse(graph_, side_effects, lsa, nullptr);
+ lse.Run();
+ }
+
+ void CreateTestControlFlowGraph() {
+ graph_ = CreateGraph();
+
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
+ pre_header_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_header_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_body_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_ = new (GetAllocator()) HBasicBlock(graph_);
+
+ graph_->AddBlock(entry_);
+ graph_->AddBlock(pre_header_);
+ graph_->AddBlock(loop_header_);
+ graph_->AddBlock(loop_body_);
+ graph_->AddBlock(exit_);
+
+ graph_->SetEntryBlock(entry_);
+
+ // This common CFG has been used by all cases in this load_store_elimination_test.
+ // entry
+ // |
+ // pre_header
+ // |
+ // loop_header <--+
+ // | |
+ // loop_body -----+
+ // |
+ // exit
+
+ entry_->AddSuccessor(pre_header_);
+ pre_header_->AddSuccessor(loop_header_);
+ loop_header_->AddSuccessor(exit_); // true successor
+ loop_header_->AddSuccessor(loop_body_); // false successor
+ loop_body_->AddSuccessor(loop_header_);
+
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c4 = graph_->GetIntConstant(4);
+ HInstruction* c128 = graph_->GetIntConstant(128);
+
+ // entry block has following instructions:
+ // array, i, j, i+1, i+4.
+ array_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
+ i_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 1,
+ DataType::Type::kInt32);
+ j_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 2,
+ DataType::Type::kInt32);
+ i_add1_ = new (GetAllocator()) HAdd(DataType::Type::kInt32, i_, c1);
+ i_add4_ = new (GetAllocator()) HAdd(DataType::Type::kInt32, i_, c4);
+ entry_->AddInstruction(array_);
+ entry_->AddInstruction(i_);
+ entry_->AddInstruction(j_);
+ entry_->AddInstruction(i_add1_);
+ entry_->AddInstruction(i_add4_);
+ entry_->AddInstruction(new (GetAllocator()) HGoto());
+
+ // pre_header block
+ pre_header_->AddInstruction(new (GetAllocator()) HGoto());
+
+ // loop header block has following instructions:
+ // phi = 0;
+ // if (phi >= 128);
+ phi_ = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ cmp_ = new (GetAllocator()) HGreaterThanOrEqual(phi_, c128);
+ if_ = new (GetAllocator()) HIf(cmp_);
+ loop_header_->AddPhi(phi_);
+ loop_header_->AddInstruction(cmp_);
+ loop_header_->AddInstruction(if_);
+ phi_->AddInput(c0);
+
+ // loop body block has following instructions:
+ // phi++;
+ HInstruction* inc_phi = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_, c1);
+ loop_body_->AddInstruction(inc_phi);
+ loop_body_->AddInstruction(new (GetAllocator()) HGoto());
+ phi_->AddInput(inc_phi);
+
+ // exit block
+ exit_->AddInstruction(new (GetAllocator()) HExit());
+ }
+
+ // To avoid tedious HIR assembly in test functions.
+ HInstruction* AddVecLoad(HBasicBlock* block, HInstruction* array, HInstruction* index) {
+ DCHECK(block != nullptr);
+ DCHECK(array != nullptr);
+ DCHECK(index != nullptr);
+ HInstruction* vload = new (GetAllocator()) HVecLoad(
+ GetAllocator(),
+ array,
+ index,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
+ block->InsertInstructionBefore(vload, block->GetLastInstruction());
+ return vload;
+ }
+
+ HInstruction* AddVecStore(HBasicBlock* block,
+ HInstruction* array,
+ HInstruction* index,
+ HVecOperation* vdata = nullptr) {
+ DCHECK(block != nullptr);
+ DCHECK(array != nullptr);
+ DCHECK(index != nullptr);
+ if (vdata == nullptr) {
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ vdata = new (GetAllocator()) HVecReplicateScalar(GetAllocator(),
+ c1,
+ DataType::Type::kInt32,
+ 4,
+ kNoDexPc);
+ block->InsertInstructionBefore(vdata, block->GetLastInstruction());
+ }
+ HInstruction* vstore = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ index,
+ vdata,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ block->InsertInstructionBefore(vstore, block->GetLastInstruction());
+ return vstore;
+ }
+
+ HInstruction* AddArrayGet(HBasicBlock* block, HInstruction* array, HInstruction* index) {
+ DCHECK(block != nullptr);
+ DCHECK(array != nullptr);
+ DCHECK(index != nullptr);
+ HInstruction* get = new (GetAllocator()) HArrayGet(array, index, DataType::Type::kInt32, 0);
+ block->InsertInstructionBefore(get, block->GetLastInstruction());
+ return get;
+ }
+
+ HInstruction* AddArraySet(HBasicBlock* block,
+ HInstruction* array,
+ HInstruction* index,
+ HInstruction* data = nullptr) {
+ DCHECK(block != nullptr);
+ DCHECK(array != nullptr);
+ DCHECK(index != nullptr);
+ if (data == nullptr) {
+ data = graph_->GetIntConstant(1);
+ }
+ HInstruction* store = new (GetAllocator()) HArraySet(array,
+ index,
+ data,
+ DataType::Type::kInt32,
+ 0);
+ block->InsertInstructionBefore(store, block->GetLastInstruction());
+ return store;
+ }
+
+ ArenaPool pool_;
+
+ HGraph* graph_;
+ HBasicBlock* entry_;
+ HBasicBlock* pre_header_;
+ HBasicBlock* loop_header_;
+ HBasicBlock* loop_body_;
+ HBasicBlock* exit_;
+
+ HInstruction* array_;
+ HInstruction* i_;
+ HInstruction* j_;
+ HInstruction* i_add1_;
+ HInstruction* i_add4_;
+
+ HPhi* phi_;
+ HInstruction* cmp_;
+ HInstruction* if_;
+};
+
+TEST_F(LoadStoreEliminationTest, ArrayGetSetElimination) {
+ CreateTestControlFlowGraph();
+
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+ HInstruction* c3 = graph_->GetIntConstant(3);
+
+ // array[1] = 1;
+ // x = array[1]; <--- Remove.
+ // y = array[2];
+ // array[1] = 1; <--- Remove, since it stores same value.
+ // array[i] = 3; <--- MAY alias.
+ // array[1] = 1; <--- Cannot remove, even if it stores the same value.
+ AddArraySet(entry_, array_, c1, c1);
+ HInstruction* load1 = AddArrayGet(entry_, array_, c1);
+ HInstruction* load2 = AddArrayGet(entry_, array_, c2);
+ HInstruction* store1 = AddArraySet(entry_, array_, c1, c1);
+ AddArraySet(entry_, array_, i_, c3);
+ HInstruction* store2 = AddArraySet(entry_, array_, c1, c1);
+
+ PerformLSE();
+
+ ASSERT_TRUE(IsRemoved(load1));
+ ASSERT_FALSE(IsRemoved(load2));
+ ASSERT_TRUE(IsRemoved(store1));
+ ASSERT_FALSE(IsRemoved(store2));
+}
+
+TEST_F(LoadStoreEliminationTest, SameHeapValue) {
+ CreateTestControlFlowGraph();
+
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c2 = graph_->GetIntConstant(2);
+
+ // Test LSE handling same value stores on array.
+ // array[1] = 1;
+ // array[2] = 1;
+ // array[1] = 1; <--- Can remove.
+ // array[1] = 2; <--- Can NOT remove.
+ AddArraySet(entry_, array_, c1, c1);
+ AddArraySet(entry_, array_, c2, c1);
+ HInstruction* store1 = AddArraySet(entry_, array_, c1, c1);
+ HInstruction* store2 = AddArraySet(entry_, array_, c1, c2);
+
+ // Test LSE handling same value stores on vector.
+ // vdata = [0x1, 0x2, 0x3, 0x4, ...]
+ // VecStore array[i...] = vdata;
+ // VecStore array[j...] = vdata; <--- MAY ALIAS.
+ // VecStore array[i...] = vdata; <--- Cannot Remove, even if it's same value.
+ AddVecStore(entry_, array_, i_);
+ AddVecStore(entry_, array_, j_);
+ HInstruction* vstore1 = AddVecStore(entry_, array_, i_);
+
+ // VecStore array[i...] = vdata;
+ // VecStore array[i+1...] = vdata; <--- MAY alias due to partial overlap.
+ // VecStore array[i...] = vdata; <--- Cannot remove, even if it's same value.
+ AddVecStore(entry_, array_, i_);
+ AddVecStore(entry_, array_, i_add1_);
+ HInstruction* vstore2 = AddVecStore(entry_, array_, i_);
+
+ PerformLSE();
+
+ ASSERT_TRUE(IsRemoved(store1));
+ ASSERT_FALSE(IsRemoved(store2));
+ ASSERT_FALSE(IsRemoved(vstore1));
+ ASSERT_FALSE(IsRemoved(vstore2));
+}
+
+TEST_F(LoadStoreEliminationTest, OverlappingLoadStore) {
+ CreateTestControlFlowGraph();
+
+ HInstruction* c1 = graph_->GetIntConstant(1);
+
+ // Test LSE handling array LSE when there is vector store in between.
+ // a[i] = 1;
+ // .. = a[i]; <-- Remove.
+ // a[i,i+1,i+2,i+3] = data; <-- PARTIAL OVERLAP !
+ // .. = a[i]; <-- Cannot remove.
+ AddArraySet(entry_, array_, i_, c1);
+ HInstruction* load1 = AddArrayGet(entry_, array_, i_);
+ AddVecStore(entry_, array_, i_);
+ HInstruction* load2 = AddArrayGet(entry_, array_, i_);
+
+ // Test LSE handling vector load/store partial overlap.
+ // a[i,i+1,i+2,i+3] = data;
+ // a[i+4,i+5,i+6,i+7] = data;
+ // .. = a[i,i+1,i+2,i+3];
+ // .. = a[i+4,i+5,i+6,i+7];
+ // a[i+1,i+2,i+3,i+4] = data; <-- PARTIAL OVERLAP !
+ // .. = a[i,i+1,i+2,i+3];
+ // .. = a[i+4,i+5,i+6,i+7];
+ AddVecStore(entry_, array_, i_);
+ AddVecStore(entry_, array_, i_add4_);
+ HInstruction* vload1 = AddVecLoad(entry_, array_, i_);
+ HInstruction* vload2 = AddVecLoad(entry_, array_, i_add4_);
+ AddVecStore(entry_, array_, i_add1_);
+ HInstruction* vload3 = AddVecLoad(entry_, array_, i_);
+ HInstruction* vload4 = AddVecLoad(entry_, array_, i_add4_);
+
+ // Test LSE handling vector LSE when there is array store in between.
+ // a[i,i+1,i+2,i+3] = data;
+ // a[i+1] = 1; <-- PARTIAL OVERLAP !
+ // .. = a[i,i+1,i+2,i+3];
+ AddVecStore(entry_, array_, i_);
+ AddArraySet(entry_, array_, i_, c1);
+ HInstruction* vload5 = AddVecLoad(entry_, array_, i_);
+
+ PerformLSE();
+
+ ASSERT_TRUE(IsRemoved(load1));
+ ASSERT_FALSE(IsRemoved(load2));
+
+ ASSERT_TRUE(IsRemoved(vload1));
+ ASSERT_TRUE(IsRemoved(vload2));
+ ASSERT_FALSE(IsRemoved(vload3));
+ ASSERT_FALSE(IsRemoved(vload4));
+
+ ASSERT_FALSE(IsRemoved(vload5));
+}
+
+// function (int[] a, int j) {
+// a[j] = 1;
+// for (int i=0; i<128; i++) {
+// /* doesn't do any write */
+// }
+// a[j] = 1;
+TEST_F(LoadStoreEliminationTest, Loop1) {
+ CreateTestControlFlowGraph();
+
+ HInstruction* c1 = graph_->GetIntConstant(1);
+
+ // a[j] = 1
+ AddArraySet(pre_header_, array_, j_, c1);
+
+ // LOOP BODY:
+ // .. = a[i,i+1,i+2,i+3];
+ AddVecLoad(loop_body_, array_, phi_);
+
+ // a[j] = 1;
+ HInstruction* array_set = AddArraySet(exit_, array_, j_, c1);
+
+ PerformLSE();
+
+ ASSERT_TRUE(IsRemoved(array_set));
+}
+
+// function (int[] a, int index) {
+// a[index] = 1;
+// int[] b = new int[128];
+// for (int i=0; i<128; i++) {
+// a[i,i+1,i+2,i+3] = vdata;
+// b[i,i+1,i+2,i+3] = a[i,i+1,i+2,i+3];
+// }
+// a[index] = 1;
+// }
+TEST_F(LoadStoreEliminationTest, Loop2) {
+ CreateTestControlFlowGraph();
+
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c128 = graph_->GetIntConstant(128);
+
+ HInstruction* array_b = new (GetAllocator()) HNewArray(c0, c128, 0);
+ entry_->AddInstruction(array_b);
+
+ // a[index] = 1;
+ AddArraySet(pre_header_, array_, i_, c1);
+
+ // a[i,i+1,i+2,i+3] = vdata;
+ // b[i,i+1,i+2,i+3] = a[i,i+1,i+2,i+3];
+ AddVecStore(loop_body_, array_, phi_);
+ HInstruction* vload = AddVecLoad(loop_body_, array_, phi_);
+ AddVecStore(loop_body_, array_b, phi_, vload->AsVecLoad());
+
+ // a[index] = 1;
+ HInstruction* a_set = AddArraySet(exit_, array_, i_, c1);
+
+ PerformLSE();
+
+ ASSERT_TRUE(IsRemoved(vload));
+ ASSERT_FALSE(IsRemoved(a_set)); // Cannot remove due to side effect in loop.
+}
+
+} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 9233eb5..252d538 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -311,13 +311,11 @@
CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- JniOptimizationFlags optimization_flags) const OVERRIDE {
+ const DexFile& dex_file) const OVERRIDE {
return ArtQuickJniCompileMethod(GetCompilerDriver(),
access_flags,
method_idx,
- dex_file,
- optimization_flags);
+ dex_file);
}
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 8cc376c..bb28d50 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -72,7 +72,7 @@
size_t SchedulingGraph::ArrayAccessHeapLocation(HInstruction* array, HInstruction* index) const {
DCHECK(heap_location_collector_ != nullptr);
- size_t heap_loc = heap_location_collector_->GetArrayAccessHeapLocation(array, index);
+ size_t heap_loc = heap_location_collector_->GetArrayHeapLocation(array, index);
// This array access should be analyzed and added to HeapLocationCollector before.
DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound);
return heap_loc;
@@ -153,12 +153,7 @@
DCHECK(field != nullptr);
DCHECK(heap_location_collector_ != nullptr);
- size_t heap_loc = heap_location_collector_->FindHeapLocationIndex(
- heap_location_collector_->FindReferenceInfoOf(
- heap_location_collector_->HuntForOriginalReference(obj)),
- field->GetFieldOffset().SizeValue(),
- nullptr,
- field->GetDeclaringClassDefIndex());
+ size_t heap_loc = heap_location_collector_->GetFieldHeapLocation(obj, field);
// This field access should be analyzed and added to HeapLocationCollector before.
DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 75dce81..104ebc7 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -294,38 +294,38 @@
size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
// Test side effect dependency: array[0] and array[1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, c0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, c1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, c1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_1, arr_set_0));
// Test side effect dependency based on LSA analysis: array[i] and array[j]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, j);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, j);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i] and array[i+0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, add0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, add0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_add0, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i] and array[i-0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, sub0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, sub0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i] and array[i+1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, add1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, add1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_add1, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i+1] and array[i-1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, sub1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, sub1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub1, arr_set_add1));
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index afca26d..c2556aa 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -941,7 +941,7 @@
class_it.Next()) {
if (class_it.IsAtMethod() && class_it.GetMethodCodeItem() != nullptr) {
for (const DexInstructionPcPair& inst :
- class_it.GetMethodCodeItem()->Instructions()) {
+ class_it.GetMethodCodeItem()->Instructions()) {
ASSERT_FALSE(inst->IsQuickened());
}
}
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index c4f16f5..dcc237d 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -629,8 +629,8 @@
// Since direct methods have different flags than virtual ones (specifically direct methods must
// have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
// virtual to direct.
- uint32_t new_flags = new_iter.GetMethodAccessFlags() & ~art::kAccPreviouslyWarm;
- if (new_flags != (old_method->GetAccessFlags() & (art::kAccValidMethodFlags ^ art::kAccPreviouslyWarm))) {
+ uint32_t new_flags = new_iter.GetMethodAccessFlags();
+ if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
StringPrintf("method '%s' (sig: %s) had different access flags",
new_method_name,
diff --git a/profman/boot_image_profile.cc b/profman/boot_image_profile.cc
index 4092f6e..e5645d3 100644
--- a/profman/boot_image_profile.cc
+++ b/profman/boot_image_profile.cc
@@ -92,7 +92,7 @@
it.SkipInstanceFields();
while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
const uint32_t flags = it.GetMethodAccessFlags();
- if ((flags & kAccNative) != 0 || (flags & kAccFastNative) != 0) {
+ if ((flags & kAccNative) != 0) {
// Native method will get dirtied.
is_clean = false;
break;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index e032238..69e4434 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -567,6 +567,7 @@
"class_linker_test.cc",
"class_loader_context_test.cc",
"class_table_test.cc",
+ "code_item_accessors_test.cc",
"compiler_filter_test.cc",
"dex_file_test.cc",
"dex_file_verifier_test.cc",
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 12b4d16..50913de 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -23,6 +23,7 @@
#include "base/callee_save_type.h"
#include "base/logging.h"
#include "class_linker-inl.h"
+#include "code_item_accessors-inl.h"
#include "common_throws.h"
#include "dex_file-inl.h"
#include "dex_file_annotations.h"
@@ -392,6 +393,7 @@
bool is_synchronized = IsSynchronized();
bool skip_access_checks = SkipAccessChecks();
bool is_fast_native = IsFastNative();
+ bool is_critical_native = IsCriticalNative();
bool is_copied = IsCopied();
bool is_miranda = IsMiranda();
bool is_default = IsDefault();
@@ -404,6 +406,7 @@
DCHECK_EQ(is_synchronized, IsSynchronized());
DCHECK_EQ(skip_access_checks, SkipAccessChecks());
DCHECK_EQ(is_fast_native, IsFastNative());
+ DCHECK_EQ(is_critical_native, IsCriticalNative());
DCHECK_EQ(is_copied, IsCopied());
DCHECK_EQ(is_miranda, IsMiranda());
DCHECK_EQ(is_default, IsDefault());
@@ -457,6 +460,18 @@
}
}
+inline IterationRange<DexInstructionIterator> ArtMethod::DexInstructions() {
+ CodeItemInstructionAccessor accessor(this);
+ return { accessor.begin(),
+ accessor.end() };
+}
+
+inline IterationRange<DexInstructionIterator> ArtMethod::NullableDexInstructions() {
+ CodeItemInstructionAccessor accessor(CodeItemInstructionAccessor::CreateNullable(this));
+ return { accessor.begin(),
+ accessor.end() };
+}
+
} // namespace art
#endif // ART_RUNTIME_ART_METHOD_INL_H_
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 8709643..0a108f9 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -26,7 +26,6 @@
#include "class_linker-inl.h"
#include "debugger.h"
#include "dex_file-inl.h"
-#include "dex_file_annotations.h"
#include "dex_instruction.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
@@ -392,13 +391,9 @@
self->PopManagedStackFragment(fragment);
}
-const void* ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
+const void* ArtMethod::RegisterNative(const void* native_method) {
CHECK(IsNative()) << PrettyMethod();
- CHECK(!IsFastNative()) << PrettyMethod();
CHECK(native_method != nullptr) << PrettyMethod();
- if (is_fast) {
- AddAccessFlags(kAccFastNative);
- }
void* new_native_method = nullptr;
Runtime::Current()->GetRuntimeCallbacks()->RegisterNativeMethod(this,
native_method,
@@ -408,7 +403,7 @@
}
void ArtMethod::UnregisterNative() {
- CHECK(IsNative() && !IsFastNative()) << PrettyMethod();
+ CHECK(IsNative()) << PrettyMethod();
// restore stub to lookup native pointer via dlsym
SetEntryPointFromJni(GetJniDlsymLookupStub());
}
@@ -428,18 +423,6 @@
cls == WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_VarHandle));
}
-bool ArtMethod::IsAnnotatedWithFastNative() {
- ScopedObjectAccess soa(Thread::Current());
- return annotations::HasFastNativeMethodBuildAnnotation(
- *GetDexFile(), GetClassDef(), GetDexMethodIndex());
-}
-
-bool ArtMethod::IsAnnotatedWithCriticalNative() {
- ScopedObjectAccess soa(Thread::Current());
- return annotations::HasCriticalNativeMethodBuildAnnotation(
- *GetDexFile(), GetClassDef(), GetDexMethodIndex());
-}
-
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file,
uint16_t class_def_idx,
uint32_t method_idx) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 8927481..c17eef1 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -22,8 +22,10 @@
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/enums.h"
+#include "base/iteration_range.h"
#include "base/logging.h"
#include "dex_file.h"
+#include "dex_instruction_iterator.h"
#include "gc_root.h"
#include "modifiers.h"
#include "obj_ptr.h"
@@ -200,9 +202,9 @@
}
bool IsMiranda() {
- static_assert((kAccMiranda & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
- "kAccMiranda conflicts with intrinsic modifier");
- return (GetAccessFlags() & kAccMiranda) != 0;
+ // The kAccMiranda flag value is used with a different meaning for native methods,
+ // so we need to check the kAccNative flag as well.
+ return (GetAccessFlags() & (kAccNative | kAccMiranda)) == kAccMiranda;
}
// Returns true if invoking this method will not throw an AbstractMethodError or
@@ -213,6 +215,7 @@
bool IsCompilable() {
if (IsIntrinsic()) {
+ // kAccCompileDontBother overlaps with kAccIntrinsicBits.
return true;
}
return (GetAccessFlags() & kAccCompileDontBother) == 0;
@@ -252,11 +255,24 @@
return (GetAccessFlags<kReadBarrierOption>() & kAccNative) != 0;
}
+ // Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative.
bool IsFastNative() {
+ // The presence of the annotation is checked by ClassLinker and recorded in access flags.
+ // The kAccFastNative flag value is used with a different meaning for non-native methods,
+ // so we need to check the kAccNative flag as well.
constexpr uint32_t mask = kAccFastNative | kAccNative;
return (GetAccessFlags() & mask) == mask;
}
+ // Checks to see if the method was annotated with @dalvik.annotation.optimization.CriticalNative.
+ bool IsCriticalNative() {
+ // The presence of the annotation is checked by ClassLinker and recorded in access flags.
+ // The kAccCriticalNative flag value is used with a different meaning for non-native methods,
+ // so we need to check the kAccNative flag as well.
+ constexpr uint32_t mask = kAccCriticalNative | kAccNative;
+ return (GetAccessFlags() & mask) == mask;
+ }
+
bool IsAbstract() {
return (GetAccessFlags() & kAccAbstract) != 0;
}
@@ -274,10 +290,14 @@
bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_);
bool SkipAccessChecks() {
- return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
+ // The kAccSkipAccessChecks flag value is used with a different meaning for native methods,
+ // so we need to check the kAccNative flag as well.
+ return (GetAccessFlags() & (kAccSkipAccessChecks | kAccNative)) == kAccSkipAccessChecks;
}
void SetSkipAccessChecks() {
+ // SkipAccessChecks() is applicable only to non-native methods.
+ DCHECK(!IsNative<kWithoutReadBarrier>());
AddAccessFlags(kAccSkipAccessChecks);
}
@@ -310,14 +330,6 @@
AddAccessFlags(kAccMustCountLocks);
}
- // Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative
- // -- Independent of kAccFastNative access flags.
- bool IsAnnotatedWithFastNative();
-
- // Checks to see if the method was annotated with @dalvik.annotation.optimization.CriticalNative
- // -- Unrelated to the GC notion of "critical".
- bool IsAnnotatedWithCriticalNative();
-
// Returns true if this method could be overridden by a default method.
bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -417,7 +429,7 @@
// Registers the native method and returns the new entry point. NB The returned entry point might
// be different from the native_method argument if some MethodCallback modifies it.
- const void* RegisterNative(const void* native_method, bool is_fast)
+ const void* RegisterNative(const void* native_method)
REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED;
void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -452,7 +464,7 @@
// where the declaring class is treated as a weak reference (accessing it with
// a read barrier would either prevent unloading the class, or crash the runtime if
// the GC wants to unload it).
- DCHECK(!IsNative<kWithoutReadBarrier>());
+ DCHECK(!IsNative());
if (UNLIKELY(IsProxyMethod())) {
return nullptr;
}
@@ -700,6 +712,15 @@
"ptr_sized_fields_.entry_point_from_quick_compiled_code_");
}
+ // Returns the dex instructions of the code item for the art method. Must not be called on null
+ // code items.
+ ALWAYS_INLINE IterationRange<DexInstructionIterator> DexInstructions()
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Handles a null code item by returning iterators that have a null address.
+ ALWAYS_INLINE IterationRange<DexInstructionIterator> NullableDexInstructions()
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
diff --git a/runtime/base/casts.h b/runtime/base/casts.h
index 0cbabba..92c493a 100644
--- a/runtime/base/casts.h
+++ b/runtime/base/casts.h
@@ -77,6 +77,14 @@
return static_cast<To>(f);
}
+template<typename To, typename From> // use like this: down_cast<T&>(foo);
+inline To down_cast(From& f) { // so we only accept references
+ static_assert(std::is_base_of<From, typename std::remove_reference<To>::type>::value,
+ "down_cast unsafe as To is not a subtype of From");
+
+ return static_cast<To>(f);
+}
+
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
diff --git a/runtime/cdex/compact_dex_file.h b/runtime/cdex/compact_dex_file.h
index 8ab9247..f17f8cf 100644
--- a/runtime/cdex/compact_dex_file.h
+++ b/runtime/cdex/compact_dex_file.h
@@ -24,11 +24,18 @@
// CompactDex is a currently ART internal dex file format that aims to reduce storage/RAM usage.
class CompactDexFile : public DexFile {
public:
+ static constexpr uint8_t kDexMagic[kDexMagicSize] = { 'c', 'd', 'e', 'x' };
+ static constexpr uint8_t kDexMagicVersion[] = {'0', '0', '1', '\0'};
+
class Header : public DexFile::Header {
// Same for now.
};
- static constexpr uint8_t kDexMagic[kDexMagicSize] = { 'c', 'd', 'e', 'x' };
- static constexpr uint8_t kDexMagicVersion[] = {'0', '0', '1', '\0'};
+
+ struct CodeItem : public DexFile::CodeItem {
+ private:
+ // TODO: Insert compact dex specific fields here.
+ DISALLOW_COPY_AND_ASSIGN(CodeItem);
+ };
// Write the compact dex specific magic.
static void WriteMagic(uint8_t* magic);
@@ -44,10 +51,6 @@
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
- bool IsCompactDexFile() const OVERRIDE {
- return true;
- }
-
private:
// Not supported yet.
CompactDexFile(const uint8_t* base,
@@ -56,7 +59,13 @@
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
DexFileContainer* container)
- : DexFile(base, size, location, location_checksum, oat_dex_file, container) {}
+ : DexFile(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ container,
+ /*is_compact_dex*/ true) {}
friend class DexFile;
friend class DexFileLoader;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 28caf81..38dd761 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3340,6 +3340,11 @@
}
}
}
+ if (UNLIKELY((access_flags & kAccNative) != 0u)) {
+ // Check if the native method is annotated with @FastNative or @CriticalNative.
+ access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
+ dex_file, dst->GetClassDef(), dex_method_idx);
+ }
dst->SetAccessFlags(access_flags);
}
@@ -7048,6 +7053,7 @@
// verified yet it shouldn't have methods that are skipping access checks.
// TODO This is rather arbitrary. We should maybe support classes where only some of its
// methods are skip_access_checks.
+ DCHECK_EQ(new_method.GetAccessFlags() & kAccNative, 0u);
constexpr uint32_t kSetFlags = kAccDefault | kAccCopied;
constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks;
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
@@ -7070,6 +7076,7 @@
// mark this as a default, non-abstract method, since thats what it is. Also clear the
// kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have
// methods that are skipping access checks.
+ DCHECK_EQ(new_method.GetAccessFlags() & kAccNative, 0u);
constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict | kAccCopied;
constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks);
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
diff --git a/runtime/code_item_accessors-inl.h b/runtime/code_item_accessors-inl.h
new file mode 100644
index 0000000..61b5175
--- /dev/null
+++ b/runtime/code_item_accessors-inl.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CODE_ITEM_ACCESSORS_INL_H_
+#define ART_RUNTIME_CODE_ITEM_ACCESSORS_INL_H_
+
+#include "code_item_accessors.h"
+
+#include "art_method-inl.h"
+#include "cdex/compact_dex_file.h"
+#include "standard_dex_file.h"
+
+namespace art {
+
+inline void CodeItemInstructionAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+ insns_size_in_code_units_ = code_item.insns_size_in_code_units_;
+ insns_ = code_item.insns_;
+}
+
+inline void CodeItemInstructionAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+ insns_size_in_code_units_ = code_item.insns_size_in_code_units_;
+ insns_ = code_item.insns_;
+}
+
+inline void CodeItemInstructionAccessor::Init(const DexFile* dex_file,
+ const DexFile::CodeItem* code_item) {
+ DCHECK(dex_file != nullptr);
+ DCHECK(code_item != nullptr);
+ if (dex_file->IsCompactDexFile()) {
+ Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
+ } else {
+ DCHECK(dex_file->IsStandardDexFile());
+ Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+ }
+}
+
+inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(
+ const DexFile* dex_file,
+ const DexFile::CodeItem* code_item) {
+ Init(dex_file, code_item);
+}
+
+inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(ArtMethod* method)
+ : CodeItemInstructionAccessor(method->GetDexFile(), method->GetCodeItem()) {}
+
+inline DexInstructionIterator CodeItemInstructionAccessor::begin() const {
+ return DexInstructionIterator(insns_, 0u);
+}
+
+inline DexInstructionIterator CodeItemInstructionAccessor::end() const {
+ return DexInstructionIterator(insns_, insns_size_in_code_units_);
+}
+
+inline CodeItemInstructionAccessor CodeItemInstructionAccessor::CreateNullable(
+ ArtMethod* method) {
+ DCHECK(method != nullptr);
+ CodeItemInstructionAccessor ret;
+ const DexFile::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr) {
+ ret.Init(method->GetDexFile(), code_item);
+ } else {
+ DCHECK(!ret.HasCodeItem()) << "Should be null initialized";
+ }
+ return ret;
+}
+
+inline void CodeItemDataAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+ CodeItemInstructionAccessor::Init(code_item);
+ registers_size_ = code_item.registers_size_;
+ ins_size_ = code_item.ins_size_;
+ outs_size_ = code_item.outs_size_;
+ tries_size_ = code_item.tries_size_;
+}
+
+inline void CodeItemDataAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+ CodeItemInstructionAccessor::Init(code_item);
+ registers_size_ = code_item.registers_size_;
+ ins_size_ = code_item.ins_size_;
+ outs_size_ = code_item.outs_size_;
+ tries_size_ = code_item.tries_size_;
+}
+
+inline void CodeItemDataAccessor::Init(const DexFile* dex_file,
+ const DexFile::CodeItem* code_item) {
+ DCHECK(dex_file != nullptr);
+ DCHECK(code_item != nullptr);
+ if (dex_file->IsCompactDexFile()) {
+ CodeItemDataAccessor::Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
+ } else {
+ DCHECK(dex_file->IsStandardDexFile());
+ CodeItemDataAccessor::Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+ }
+}
+
+inline CodeItemDataAccessor::CodeItemDataAccessor(const DexFile* dex_file,
+ const DexFile::CodeItem* code_item) {
+ Init(dex_file, code_item);
+}
+
+inline CodeItemDataAccessor::CodeItemDataAccessor(ArtMethod* method)
+ : CodeItemDataAccessor(method->GetDexFile(), method->GetCodeItem()) {}
+
+inline CodeItemDataAccessor CodeItemDataAccessor::CreateNullable(ArtMethod* method) {
+ DCHECK(method != nullptr);
+ CodeItemDataAccessor ret;
+ const DexFile::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr) {
+ ret.Init(method->GetDexFile(), code_item);
+ } else {
+ DCHECK(!ret.HasCodeItem()) << "Should be null initialized";
+ }
+ return ret;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_CODE_ITEM_ACCESSORS_INL_H_
diff --git a/runtime/code_item_accessors.h b/runtime/code_item_accessors.h
new file mode 100644
index 0000000..fcece3e
--- /dev/null
+++ b/runtime/code_item_accessors.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// TODO: Dex helpers have ART specific APIs, we may want to refactor these for use in dexdump.
+
+#ifndef ART_RUNTIME_CODE_ITEM_ACCESSORS_H_
+#define ART_RUNTIME_CODE_ITEM_ACCESSORS_H_
+
+#include "base/mutex.h"
+#include "cdex/compact_dex_file.h"
+#include "dex_file.h"
+#include "dex_instruction_iterator.h"
+#include "standard_dex_file.h"
+
+namespace art {
+
+class ArtMethod;
+
+// Abstracts accesses to the instruction fields of code items for CompactDexFile and
+// StandardDexFile.
+class CodeItemInstructionAccessor {
+ public:
+ ALWAYS_INLINE CodeItemInstructionAccessor(const DexFile* dex_file,
+ const DexFile::CodeItem* code_item);
+
+ ALWAYS_INLINE explicit CodeItemInstructionAccessor(ArtMethod* method);
+
+ ALWAYS_INLINE DexInstructionIterator begin() const;
+
+ ALWAYS_INLINE DexInstructionIterator end() const;
+
+ uint32_t InsnsSizeInCodeUnits() const {
+ return insns_size_in_code_units_;
+ }
+
+ const uint16_t* Insns() const {
+ return insns_;
+ }
+
+ // Return true if the accessor has a code item.
+ bool HasCodeItem() const {
+ return Insns() != nullptr;
+ }
+
+ // CreateNullable allows ArtMethods that have a null code item.
+ ALWAYS_INLINE static CodeItemInstructionAccessor CreateNullable(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ protected:
+ CodeItemInstructionAccessor() = default;
+
+ ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
+ ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
+ ALWAYS_INLINE void Init(const DexFile* dex_file, const DexFile::CodeItem* code_item);
+
+ private:
+ // size of the insns array, in 2 byte code units. 0 if there is no code item.
+ uint32_t insns_size_in_code_units_ = 0;
+
+ // Pointer to the instructions, null if there is no code item.
+ const uint16_t* insns_ = 0;
+};
+
+// Abstracts accesses to code item fields other than debug info for CompactDexFile and
+// StandardDexFile.
+class CodeItemDataAccessor : public CodeItemInstructionAccessor {
+ public:
+ ALWAYS_INLINE CodeItemDataAccessor(const DexFile* dex_file, const DexFile::CodeItem* code_item);
+
+ ALWAYS_INLINE explicit CodeItemDataAccessor(ArtMethod* method);
+
+ uint16_t RegistersSize() const {
+ return registers_size_;
+ }
+
+ uint16_t InsSize() const {
+ return ins_size_;
+ }
+
+ uint16_t OutsSize() const {
+ return outs_size_;
+ }
+
+ uint16_t TriesSize() const {
+ return tries_size_;
+ }
+
+ // CreateNullable allows ArtMethods that have a null code item.
+ ALWAYS_INLINE static CodeItemDataAccessor CreateNullable(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ protected:
+ CodeItemDataAccessor() = default;
+
+ ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
+ ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
+ ALWAYS_INLINE void Init(const DexFile* dex_file, const DexFile::CodeItem* code_item);
+
+ private:
+ // Fields mirrored from the dex/cdex code item.
+ uint16_t registers_size_;
+ uint16_t ins_size_;
+ uint16_t outs_size_;
+ uint16_t tries_size_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CODE_ITEM_ACCESSORS_H_
diff --git a/runtime/code_item_accessors_test.cc b/runtime/code_item_accessors_test.cc
new file mode 100644
index 0000000..ef5d246
--- /dev/null
+++ b/runtime/code_item_accessors_test.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_item_accessors-inl.h"
+
+#include <memory>
+
+#include "common_runtime_test.h"
+#include "dex_file_loader.h"
+#include "mem_map.h"
+
+namespace art {
+
+class CodeItemAccessorsTest : public CommonRuntimeTest {};
+
+std::unique_ptr<const DexFile> CreateFakeDex(bool compact_dex) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> map(
+ MemMap::MapAnonymous(/*name*/ "map",
+ /*addr*/ nullptr,
+ /*byte_count*/ kPageSize,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error_msg));
+ CHECK(map != nullptr) << error_msg;
+ if (compact_dex) {
+ CompactDexFile::WriteMagic(map->Begin());
+ CompactDexFile::WriteCurrentVersion(map->Begin());
+ } else {
+ StandardDexFile::WriteMagic(map->Begin());
+ StandardDexFile::WriteCurrentVersion(map->Begin());
+ }
+ std::unique_ptr<const DexFile> dex(
+ DexFileLoader::Open("location",
+ /*location_checksum*/ 123,
+ std::move(map),
+ /*verify*/false,
+ /*verify_checksum*/false,
+ &error_msg));
+ CHECK(dex != nullptr) << error_msg;
+ return dex;
+}
+
+TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
+ MemMap::Init();
+ std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex*/false));
+ ASSERT_TRUE(standard_dex != nullptr);
+ std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex*/true));
+ ASSERT_TRUE(compact_dex != nullptr);
+ static constexpr uint16_t kRegisterSize = 1;
+ static constexpr uint16_t kInsSize = 2;
+ static constexpr uint16_t kOutsSize = 3;
+ static constexpr uint16_t kTriesSize = 4;
+ // debug_info_off_ is not accessible from the helpers yet.
+ static constexpr size_t kInsnsSizeInCodeUnits = 5;
+
+ auto verify_code_item = [&](const DexFile* dex,
+ const DexFile::CodeItem* item,
+ const uint16_t* insns) {
+ CodeItemInstructionAccessor insns_accessor(dex, item);
+ EXPECT_TRUE(insns_accessor.HasCodeItem());
+ ASSERT_EQ(insns_accessor.InsnsSizeInCodeUnits(), kInsnsSizeInCodeUnits);
+ EXPECT_EQ(insns_accessor.Insns(), insns);
+
+ CodeItemDataAccessor data_accessor(dex, item);
+ EXPECT_TRUE(data_accessor.HasCodeItem());
+ EXPECT_EQ(data_accessor.InsnsSizeInCodeUnits(), kInsnsSizeInCodeUnits);
+ EXPECT_EQ(data_accessor.Insns(), insns);
+ EXPECT_EQ(data_accessor.RegistersSize(), kRegisterSize);
+ EXPECT_EQ(data_accessor.InsSize(), kInsSize);
+ EXPECT_EQ(data_accessor.OutsSize(), kOutsSize);
+ EXPECT_EQ(data_accessor.TriesSize(), kTriesSize);
+ };
+
+ uint8_t buffer1[sizeof(CompactDexFile::CodeItem) + kInsnsSizeInCodeUnits * sizeof(uint16_t)] = {};
+ CompactDexFile::CodeItem* dex_code_item = reinterpret_cast<CompactDexFile::CodeItem*>(buffer1);
+ dex_code_item->registers_size_ = kRegisterSize;
+ dex_code_item->ins_size_ = kInsSize;
+ dex_code_item->outs_size_ = kOutsSize;
+ dex_code_item->tries_size_ = kTriesSize;
+ dex_code_item->insns_size_in_code_units_ = kInsnsSizeInCodeUnits;
+ verify_code_item(compact_dex.get(), dex_code_item, dex_code_item->insns_);
+
+ uint8_t buffer2[sizeof(CompactDexFile::CodeItem) + kInsnsSizeInCodeUnits * sizeof(uint16_t)] = {};
+ CompactDexFile::CodeItem* cdex_code_item = reinterpret_cast<CompactDexFile::CodeItem*>(buffer2);
+ cdex_code_item->registers_size_ = kRegisterSize;
+ cdex_code_item->ins_size_ = kInsSize;
+ cdex_code_item->outs_size_ = kOutsSize;
+ cdex_code_item->tries_size_ = kTriesSize;
+ cdex_code_item->insns_size_in_code_units_ = kInsnsSizeInCodeUnits;
+ verify_code_item(compact_dex.get(), cdex_code_item, cdex_code_item->insns_);
+}
+
+} // namespace art
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index 5dfbd9b..58cd486 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -21,9 +21,11 @@
#include "base/casts.h"
#include "base/logging.h"
#include "base/stringpiece.h"
+#include "cdex/compact_dex_file.h"
#include "dex_file.h"
#include "invoke_type.h"
#include "leb128.h"
+#include "standard_dex_file.h"
namespace art {
@@ -495,6 +497,16 @@
context);
}
+inline const CompactDexFile* DexFile::AsCompactDexFile() const {
+ DCHECK(IsCompactDexFile());
+ return down_cast<const CompactDexFile*>(this);
+}
+
+inline const StandardDexFile* DexFile::AsStandardDexFile() const {
+ DCHECK(IsStandardDexFile());
+ return down_cast<const StandardDexFile*>(this);
+}
+
} // namespace art
#endif // ART_RUNTIME_DEX_FILE_INL_H_
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 974c7ac..7b0c46b 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -77,7 +77,8 @@
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
- DexFileContainer* container)
+ DexFileContainer* container,
+ bool is_compact_dex)
: begin_(base),
size_(size),
location_(location),
@@ -94,7 +95,8 @@
call_site_ids_(nullptr),
num_call_site_ids_(0),
oat_dex_file_(oat_dex_file),
- container_(container) {
+ container_(container),
+ is_compact_dex_(is_compact_dex) {
CHECK(begin_ != nullptr) << GetLocation();
CHECK_GT(size_, 0U) << GetLocation();
// Check base (=header) alignment.
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 5c9b258..5c0093f 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -32,10 +32,12 @@
namespace art {
+class CompactDexFile;
enum InvokeType : uint32_t;
class MemMap;
class OatDexFile;
class Signature;
+class StandardDexFile;
class StringPiece;
class ZipArchive;
@@ -993,13 +995,15 @@
// Returns a human-readable form of the type at an index.
std::string PrettyType(dex::TypeIndex type_idx) const;
- // Helper functions.
- virtual bool IsCompactDexFile() const {
- return false;
+ // Not virtual for performance reasons.
+ ALWAYS_INLINE bool IsCompactDexFile() const {
+ return is_compact_dex_;
}
- virtual bool IsStandardDexFile() const {
- return false;
+ ALWAYS_INLINE bool IsStandardDexFile() const {
+ return !is_compact_dex_;
}
+ ALWAYS_INLINE const StandardDexFile* AsStandardDexFile() const;
+ ALWAYS_INLINE const CompactDexFile* AsCompactDexFile() const;
protected:
DexFile(const uint8_t* base,
@@ -1007,7 +1011,8 @@
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
- DexFileContainer* container);
+ DexFileContainer* container,
+ bool is_compact_dex);
// Top-level initializer that calls other Init methods.
bool Init(std::string* error_msg);
@@ -1073,6 +1078,9 @@
// Manages the underlying memory allocation.
std::unique_ptr<DexFileContainer> container_;
+ // If the dex file is a compact dex file. If false then the dex file is a standard dex file.
+ const bool is_compact_dex_;
+
friend class DexFileLoader;
friend class DexFileVerifierTest;
friend class OatWriter;
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 5496efd..27060ae 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -1239,8 +1239,11 @@
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- // Lookup using the boot class path loader should yield the annotation class.
- CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+ // WellKnownClasses may not be initialized yet, so `klass` may be null.
+ if (klass != nullptr) {
+ // Lookup using the boot class path loader should yield the annotation class.
+ CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+ }
}
}
@@ -1266,30 +1269,31 @@
return false;
}
-uint32_t HasFastNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index) {
+uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
+ const DexFile::ClassDef& class_def,
+ uint32_t method_index) {
const DexFile::AnnotationSetItem* annotation_set =
FindAnnotationSetForMethod(dex_file, class_def, method_index);
- return annotation_set != nullptr &&
- IsMethodBuildAnnotationPresent(
- dex_file,
- *annotation_set,
- "Ldalvik/annotation/optimization/FastNative;",
- WellKnownClasses::dalvik_annotation_optimization_FastNative);
-}
-
-uint32_t HasCriticalNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index) {
- const DexFile::AnnotationSetItem* annotation_set =
- FindAnnotationSetForMethod(dex_file, class_def, method_index);
- return annotation_set != nullptr &&
- IsMethodBuildAnnotationPresent(
- dex_file,
- *annotation_set,
- "Ldalvik/annotation/optimization/CriticalNative;",
- WellKnownClasses::dalvik_annotation_optimization_CriticalNative);
+ if (annotation_set == nullptr) {
+ return 0u;
+ }
+ uint32_t access_flags = 0u;
+ if (IsMethodBuildAnnotationPresent(
+ dex_file,
+ *annotation_set,
+ "Ldalvik/annotation/optimization/FastNative;",
+ WellKnownClasses::dalvik_annotation_optimization_FastNative)) {
+ access_flags |= kAccFastNative;
+ }
+ if (IsMethodBuildAnnotationPresent(
+ dex_file,
+ *annotation_set,
+ "Ldalvik/annotation/optimization/CriticalNative;",
+ WellKnownClasses::dalvik_annotation_optimization_CriticalNative)) {
+ access_flags |= kAccCriticalNative;
+ }
+ CHECK_NE(access_flags, kAccFastNative | kAccCriticalNative);
+ return access_flags;
}
mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
diff --git a/runtime/dex_file_annotations.h b/runtime/dex_file_annotations.h
index b1c6b77..a934a4f 100644
--- a/runtime/dex_file_annotations.h
+++ b/runtime/dex_file_annotations.h
@@ -75,15 +75,12 @@
uint32_t visibility = DexFile::kDexVisibilityRuntime)
REQUIRES_SHARED(Locks::mutator_lock_);
// Check whether a method from the `dex_file` with the given `method_index`
-// is annotated with @dalvik.annotation.optimization.FastNative with build visibility.
-uint32_t HasFastNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index);
-// Check whether a method from the `dex_file` with the given `method_index`
-// is annotated with @dalvik.annotation.optimization.CriticalNative with build visibility.
-uint32_t HasCriticalNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index);
+// is annotated with @dalvik.annotation.optimization.FastNative or
+// @dalvik.annotation.optimization.CriticalNative with build visibility.
+// If yes, return the associated access flags, i.e. kAccFastNative or kAccCriticalNative.
+uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
+ const DexFile::ClassDef& class_def,
+ uint32_t method_index);
// Class annotations.
mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index dd0819e..7ec360a 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -46,7 +46,7 @@
return nullptr;
}
// Register so that future calls don't come here
- return method->RegisterNative(native_code, false);
+ return method->RegisterNative(native_code);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index a8d2a34..29a62c8 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -28,10 +28,7 @@
static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
-static bool kEnableAnnotationChecks = RegisterRuntimeDebugFlag(&kEnableAnnotationChecks);
-
-template <bool kDynamicFast>
-static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
+static inline void GoToRunnableFast(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
Thread* self ATTRIBUTE_UNUSED) {
@@ -56,9 +53,9 @@
uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
- if (kIsDebugBuild && kEnableAnnotationChecks) {
+ if (kIsDebugBuild) {
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
- CHECK(native_method->IsAnnotatedWithFastNative()) << native_method->PrettyMethod();
+ CHECK(native_method->IsFastNative()) << native_method->PrettyMethod();
}
return saved_local_ref_cookie;
@@ -71,6 +68,9 @@
uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+ // TODO: Introduce special entrypoint for synchronized @FastNative methods?
+ // Or ban synchronized @FastNative outright to avoid the extra check here?
+ DCHECK(!native_method->IsFastNative() || native_method->IsSynchronized());
if (!native_method->IsFastNative()) {
// When not fast JNI we transition out of runnable.
self->TransitionFromRunnableToSuspended(kNative);
@@ -90,25 +90,18 @@
if (!is_fast) {
self->TransitionFromSuspendedToRunnable();
} else {
- GoToRunnableFast</*kDynamicFast*/true>(self);
+ GoToRunnableFast(self);
}
}
-// TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI.
-template <bool kDynamicFast>
-ALWAYS_INLINE static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
- if (kIsDebugBuild && kEnableAnnotationChecks) {
- // Should only enter here if the method is !Fast JNI or @FastNative.
+ALWAYS_INLINE static inline void GoToRunnableFast(Thread* self) {
+ if (kIsDebugBuild) {
+ // Should only enter here if the method is @FastNative.
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
-
- if (kDynamicFast) {
- CHECK(native_method->IsFastNative()) << native_method->PrettyMethod();
- } else {
- CHECK(native_method->IsAnnotatedWithFastNative()) << native_method->PrettyMethod();
- }
+ CHECK(native_method->IsFastNative()) << native_method->PrettyMethod();
}
- // When we are in "fast" JNI or @FastNative, we are already Runnable.
+ // When we are in @FastNative, we are already Runnable.
// Only do a suspend check on the way out of JNI.
if (UNLIKELY(self->TestAllFlags())) {
// In fast JNI mode we never transitioned out of runnable. Perform a suspend check if there
@@ -138,7 +131,7 @@
}
extern void JniMethodFastEnd(uint32_t saved_local_ref_cookie, Thread* self) {
- GoToRunnableFast</*kDynamicFast*/false>(self);
+ GoToRunnableFast(self);
PopLocalReferences(saved_local_ref_cookie, self);
}
@@ -175,7 +168,7 @@
extern mirror::Object* JniMethodFastEndWithReference(jobject result,
uint32_t saved_local_ref_cookie,
Thread* self) {
- GoToRunnableFast</*kDynamicFast*/false>(self);
+ GoToRunnableFast(self);
return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self);
}
@@ -203,8 +196,8 @@
HandleScope* handle_scope)
// TODO: NO_THREAD_SAFETY_ANALYSIS as GoToRunnable() is NO_THREAD_SAFETY_ANALYSIS
NO_THREAD_SAFETY_ANALYSIS {
- bool critical_native = called->IsAnnotatedWithCriticalNative();
- bool fast_native = called->IsAnnotatedWithFastNative();
+ bool critical_native = called->IsCriticalNative();
+ bool fast_native = called->IsFastNative();
bool normal_native = !critical_native && !fast_native;
// @Fast and @CriticalNative do not do a state transition.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index a4a8c34..127b5d7 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2171,32 +2171,14 @@
*/
extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Note: We cannot walk the stack properly until fixed up below.
ArtMethod* called = *sp;
DCHECK(called->IsNative()) << called->PrettyMethod(true);
- // Fix up a callee-save frame at the bottom of the stack (at `*sp`,
- // above the alloca region) while we check for optimization
- // annotations, thus allowing stack walking until the completion of
- // the JNI frame creation.
- //
- // Note however that the Generic JNI trampoline does not expect
- // exception being thrown at that stage.
- *sp = Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
- self->SetTopOfStack(sp);
uint32_t shorty_len = 0;
const char* shorty = called->GetShorty(&shorty_len);
- // Optimization annotations lookup does not try to resolve classes,
- // as this may throw an exception, which is not supported by the
- // Generic JNI trampoline at this stage; instead, method's
- // annotations' classes are looked up in the bootstrap class
- // loader's resolved types (which won't trigger an exception).
- CHECK(!self->IsExceptionPending());
- bool critical_native = called->IsAnnotatedWithCriticalNative();
- CHECK(!self->IsExceptionPending());
- bool fast_native = called->IsAnnotatedWithFastNative();
- CHECK(!self->IsExceptionPending());
+ bool critical_native = called->IsCriticalNative();
+ bool fast_native = called->IsFastNative();
bool normal_native = !critical_native && !fast_native;
- // Restore the initial ArtMethod pointer at `*sp`.
- *sp = called;
// Run the visitor and update sp.
BuildGenericJniFrameVisitor visitor(self,
@@ -2212,7 +2194,7 @@
visitor.FinalizeHandleScope(self);
}
- // Fix up managed-stack things in Thread.
+ // Fix up managed-stack things in Thread. After this we can walk the stack.
self->SetTopOfStack(sp);
self->VerifyStack();
diff --git a/runtime/image.cc b/runtime/image.cc
index cf5feac..8f35d84 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '0', '\0' }; // strcmp() @FastNative.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '1', '\0' }; // @FastNative access flags.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 1344ca0..e54a017 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -44,8 +44,7 @@
DCHECK(!method->IsNative());
std::vector<uint32_t> entries;
-
- for (const DexInstructionPcPair& inst : method->GetCodeItem()->Instructions()) {
+ for (const DexInstructionPcPair& inst : method->DexInstructions()) {
switch (inst->Opcode()) {
case Instruction::INVOKE_VIRTUAL:
case Instruction::INVOKE_VIRTUAL_RANGE:
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 5164c85..1e55158 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2364,7 +2364,7 @@
// TODO: make this a hard register error in the future.
}
- const void* final_function_ptr = m->RegisterNative(fnPtr, is_fast);
+ const void* final_function_ptr = m->RegisterNative(fnPtr);
UNUSED(final_function_ptr);
}
return JNI_OK;
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 4d810db..892c039 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1244,7 +1244,6 @@
// still return a synthetic method to handle situations like
// escalated visibility. We never return miranda methods that
// were synthesized by the runtime.
- constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
StackHandleScope<3> hs(self);
auto h_method_name = hs.NewHandle(name);
if (UNLIKELY(h_method_name == nullptr)) {
@@ -1264,11 +1263,10 @@
}
continue;
}
- auto modifiers = m.GetAccessFlags();
- if ((modifiers & kSkipModifiers) == 0) {
- return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
- }
- if ((modifiers & kAccMiranda) == 0) {
+ if (!m.IsMiranda()) {
+ if (!m.IsSynthetic()) {
+ return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+ }
result = &m; // Remember as potential result if it's not a miranda method.
}
}
@@ -1291,11 +1289,11 @@
}
continue;
}
- if ((modifiers & kSkipModifiers) == 0) {
+ DCHECK(!m.IsMiranda()); // Direct methods cannot be miranda methods.
+ if ((modifiers & kAccSynthetic) == 0) {
return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
- // Direct methods cannot be miranda methods, so this potential result must be synthetic.
- result = &m;
+ result = &m; // Remember as potential result.
}
}
return result != nullptr
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index bf49f51..c545a9b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -286,7 +286,7 @@
// This does not necessarily mean that access checks are avoidable,
// since the class methods might still need to be run with access checks.
bool WasVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
+ return (GetAccessFlags() & kAccVerificationAttempted) != 0;
}
// Mark the class as having gone through a verification attempt.
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 4b790a0..d7d647b 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -49,17 +49,21 @@
// declaring class. This flag may only be applied to methods.
static constexpr uint32_t kAccObsoleteMethod = 0x00040000; // method (runtime)
// Used by a method to denote that its execution does not need to go through slow path interpreter.
-static constexpr uint32_t kAccSkipAccessChecks = 0x00080000; // method (dex only)
+static constexpr uint32_t kAccSkipAccessChecks = 0x00080000; // method (runtime, not native)
// Used by a class to denote that the verifier has attempted to check it at least once.
static constexpr uint32_t kAccVerificationAttempted = 0x00080000; // class (runtime)
-static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex only)
// This is set by the class linker during LinkInterfaceMethods. It is used by a method to represent
// that it was copied from its declaring class into another class. All methods marked kAccMiranda
// and kAccDefaultConflict will have this bit set. Any kAccDefault method contained in the methods_
// array of a concrete class will also have this bit set.
static constexpr uint32_t kAccCopied = 0x00100000; // method (runtime)
-static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
+static constexpr uint32_t kAccMiranda = 0x00200000; // method (runtime, not native)
static constexpr uint32_t kAccDefault = 0x00400000; // method (runtime)
+// Native method flags are set when linking the methods based on the presence of the
+// @dalvik.annotation.optimization.{Fast,Critical}Native annotations with build visibility.
+// Reuse the values of kAccSkipAccessChecks and kAccMiranda which are not used for native methods.
+static constexpr uint32_t kAccFastNative = 0x00080000; // method (runtime; native only)
+static constexpr uint32_t kAccCriticalNative = 0x00200000; // method (runtime; native only)
// Set by the JIT when clearing profiling infos to denote that a method was previously warm.
static constexpr uint32_t kAccPreviouslyWarm = 0x00800000; // method (runtime)
@@ -106,8 +110,9 @@
// Valid (meaningful) bits for a method.
static constexpr uint32_t kAccValidMethodFlags = kAccPublic | kAccPrivate | kAccProtected |
kAccStatic | kAccFinal | kAccSynchronized | kAccBridge | kAccVarargs | kAccNative |
- kAccAbstract | kAccStrict | kAccSynthetic | kAccMiranda | kAccConstructor |
- kAccDeclaredSynchronized | kAccPreviouslyWarm;
+ kAccAbstract | kAccStrict | kAccSynthetic | kAccConstructor | kAccDeclaredSynchronized;
+static_assert(((kAccIntrinsic | kAccIntrinsicBits) & kAccValidMethodFlags) == 0,
+ "Intrinsic bits and valid dex file method access flags must not overlap.");
// Valid (meaningful) bits for a class (not interface).
// Note 1. These are positive bits. Other bits may have to be zero.
diff --git a/runtime/native/scoped_fast_native_object_access-inl.h b/runtime/native/scoped_fast_native_object_access-inl.h
index b2abc46..20ff76e 100644
--- a/runtime/native/scoped_fast_native_object_access-inl.h
+++ b/runtime/native/scoped_fast_native_object_access-inl.h
@@ -27,7 +27,7 @@
inline ScopedFastNativeObjectAccess::ScopedFastNativeObjectAccess(JNIEnv* env)
: ScopedObjectAccessAlreadyRunnable(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsAnnotatedWithFastNative());
+ DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
// Don't work with raw objects in non-runnable states.
DCHECK_EQ(Self()->GetState(), kRunnable);
}
diff --git a/runtime/standard_dex_file.cc b/runtime/standard_dex_file.cc
index 36bb37a..4c1d308 100644
--- a/runtime/standard_dex_file.cc
+++ b/runtime/standard_dex_file.cc
@@ -31,6 +31,16 @@
{'0', '3', '9', '\0'},
};
+void StandardDexFile::WriteMagic(uint8_t* magic) {
+ std::copy_n(kDexMagic, kDexMagicSize, magic);
+}
+
+void StandardDexFile::WriteCurrentVersion(uint8_t* magic) {
+ std::copy_n(kDexMagicVersions[StandardDexFile::kDexVersionLen - 1],
+ kDexVersionLen,
+ magic + kDexMagicSize);
+}
+
bool StandardDexFile::IsMagicValid(const uint8_t* magic) {
return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
}
diff --git a/runtime/standard_dex_file.h b/runtime/standard_dex_file.h
index 784ab31..5d53597 100644
--- a/runtime/standard_dex_file.h
+++ b/runtime/standard_dex_file.h
@@ -32,6 +32,18 @@
// Same for now.
};
+ struct CodeItem : public DexFile::CodeItem {
+ private:
+ // TODO: Insert standard dex specific fields here.
+ DISALLOW_COPY_AND_ASSIGN(CodeItem);
+ };
+
+ // Write the standard dex specific magic.
+ static void WriteMagic(uint8_t* magic);
+
+ // Write the current version, note that the input is the address of the magic.
+ static void WriteCurrentVersion(uint8_t* magic);
+
static const uint8_t kDexMagic[kDexMagicSize];
static constexpr size_t kNumDexVersions = 4;
static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
@@ -44,10 +56,6 @@
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
- bool IsStandardDexFile() const OVERRIDE {
- return true;
- }
-
private:
StandardDexFile(const uint8_t* base,
size_t size,
@@ -55,7 +63,13 @@
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
DexFileContainer* container)
- : DexFile(base, size, location, location_checksum, oat_dex_file, container) {}
+ : DexFile(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ container,
+ /*is_compact_dex*/ false) {}
friend class DexFileLoader;
friend class DexFileVerifierTest;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 88f1fc6..9f55314 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -288,12 +288,17 @@
#if HAVE_TIMED_RWLOCK
// Attempt to rectify locks so that we dump thread list with required locks before exiting.
NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
+ // Increment gAborting before doing the thread list dump since we don't want any failures from
+ // AssertThreadSuspensionIsAllowable in cases where thread suspension is not allowed.
+ // See b/69044468.
+ ++gAborting;
Runtime* runtime = Runtime::Current();
std::ostringstream ss;
ss << "Thread suspend timeout\n";
Locks::mutator_lock_->Dump(ss);
ss << "\n";
runtime->GetThreadList()->Dump(ss);
+ --gAborting;
LOG(FATAL) << ss.str();
exit(0);
}
@@ -302,6 +307,8 @@
// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
// individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then
// we use sched_yield instead of calling usleep.
+// Although there is the possibility, here and elsewhere, that usleep could return -1 and
+// errno = EINTR, there should be no problem if interrupted, so we do not check.
static void ThreadSuspendSleep(useconds_t delay_us) {
if (delay_us == 0) {
sched_yield();
diff --git a/test/669-moveable-string-class-equals/run b/test/669-moveable-string-class-equals/run
index d0ab6f8..7c74d8c 100755
--- a/test/669-moveable-string-class-equals/run
+++ b/test/669-moveable-string-class-equals/run
@@ -16,4 +16,4 @@
# Run without image, so that String.class is moveable.
# Reduce heap size to force more frequent GCs.
-${RUN} --no-image --no-dex2oat --runtime-option -Xmx16m "$@"
+${RUN} --no-image --runtime-option -Xmx16m "$@"
diff --git a/test/669-moveable-string-class-equals/src/Main.java b/test/669-moveable-string-class-equals/src/Main.java
index 4badade..d182d51 100644
--- a/test/669-moveable-string-class-equals/src/Main.java
+++ b/test/669-moveable-string-class-equals/src/Main.java
@@ -43,6 +43,10 @@
array[i] = "V" + i;
}
+ // Continually check string equality between a newly allocated String and an
+ // already allocated String with the same contents while allocating over 128MiB
+ // memory (with heap size limited to 16MiB), ensuring we run GC and stress the
+ // instanceof check in the String.equals() implementation.
for (int count = 0; count != 128 * 1024; ++count) {
for (int i = 0; i != length; ++i) {
allocateAtLeast1KiB();
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 69e4b87..8755f04 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -40,46 +40,22 @@
TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
# Also need libartagent.
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libartagent)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libartagentd)
-ifdef TARGET_2ND_ARCH
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libartagent)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libartagentd)
-endif
+TEST_ART_TARGET_SYNC_DEPS += libartagent-target libartagentd-target
# Also need libtiagent.
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libtiagent)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libtiagentd)
-ifdef TARGET_2ND_ARCH
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libtiagent)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libtiagentd)
-endif
+TEST_ART_TARGET_SYNC_DEPS += libtiagent-target libtiagentd-target
# Also need libtistress.
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libtistress)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libtistressd)
-ifdef TARGET_2ND_ARCH
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libtistress)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libtistressd)
-endif
+TEST_ART_TARGET_SYNC_DEPS += libtistress-target libtistressd-target
# Also need libarttest.
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libarttest)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libarttestd)
-ifdef TARGET_2ND_ARCH
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libarttest)
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libarttestd)
-endif
+TEST_ART_TARGET_SYNC_DEPS += libarttest-target libarttestd-target
# Also need libnativebridgetest.
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libnativebridgetest)
-ifdef TARGET_2ND_ARCH
-TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libnativebridgetest)
-endif
+TEST_ART_TARGET_SYNC_DEPS += libnativebridgetest-target
# Also need libopenjdkjvmti.
-TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti
-TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmtid
+TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti-target libopenjdkjvmtid-target
TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 53b5093..fd9ad0b 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -35,7 +35,7 @@
using_jack=$(get_build_var ANDROID_COMPILE_WITH_JACK)
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target libjdwp"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target"
mode="target"
j_arg="-j$(nproc)"
showcommands=
@@ -70,16 +70,23 @@
extra_args=SOONG_ALLOW_MISSING_DEPENDENCIES=true
if [[ $mode == "host" ]]; then
- make_command="make $j_arg $extra_args $showcommands build-art-host-tests $common_targets dx-tests"
- make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "
- make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
- make_command+=" libwrapagentpropertiesd libwrapagentproperties"
+ make_command="make $j_arg $extra_args $showcommands build-art-host-tests $common_targets"
+ make_command+=" dx-tests"
+ mode_suffix="-host"
elif [[ $mode == "target" ]]; then
make_command="make $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
- make_command+=" libjavacrypto libjavacoretests libnetd_client linker toybox toolbox sh"
+ make_command+=" libjavacrypto-target libnetd_client-target linker toybox toolbox sh"
make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ "
make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt"
+ mode_suffix="-target"
fi
+mode_specific_libraries="libjavacoretests libjdwp libwrapagentproperties libwrapagentpropertiesd"
+for LIB in ${mode_specific_libraries} ; do
+ make_command+=" $LIB${mode_suffix}"
+done
+
+
+
echo "Executing $make_command"
$make_command
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index 646a96a..354bee8 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -91,5 +91,11 @@
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
"org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+},
+{
+ description: "Test is flaky",
+ result: EXEC_FAILED,
+ bug: 69121056,
+ name: "org.apache.harmony.jpda.tests.jdwp.ObjectReference.IsCollectedTest#testIsCollected001"
}
]
diff --git a/tools/libjdwp_oj_art_failures.txt b/tools/libjdwp_oj_art_failures.txt
index e0f243c..787c4d2 100644
--- a/tools/libjdwp_oj_art_failures.txt
+++ b/tools/libjdwp_oj_art_failures.txt
@@ -61,5 +61,11 @@
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
"org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+},
+{
+ description: "Test is flaky",
+ result: EXEC_FAILED,
+ bug: 69121056,
+ name: "org.apache.harmony.jpda.tests.jdwp.ObjectReference.IsCollectedTest#testIsCollected001"
}
]