summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/compiler.h9
-rw-r--r--compiler/driver/compiler_driver.cc36
-rw-r--r--compiler/jni/jni_compiler_test.cc20
-rw-r--r--compiler/jni/quick/jni_compiler.cc16
-rw-r--r--compiler/jni/quick/jni_compiler.h3
-rw-r--r--compiler/optimizing/load_store_analysis.cc179
-rw-r--r--compiler/optimizing/load_store_analysis.h129
-rw-r--r--compiler/optimizing/load_store_analysis_test.cc272
-rw-r--r--compiler/optimizing/load_store_elimination.cc38
-rw-r--r--compiler/optimizing/optimizing_compiler.cc6
-rw-r--r--compiler/optimizing/scheduler.cc9
-rw-r--r--compiler/optimizing/scheduler_test.cc24
-rw-r--r--openjdkjvmti/ti_redefine.cc4
-rw-r--r--profman/boot_image_profile.cc2
-rw-r--r--runtime/art_method-inl.h2
-rw-r--r--runtime/art_method.cc21
-rw-r--r--runtime/art_method.h38
-rw-r--r--runtime/class_linker.cc7
-rw-r--r--runtime/dex_file_annotations.cc52
-rw-r--r--runtime/dex_file_annotations.h15
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc39
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc26
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/jni_internal.cc2
-rw-r--r--runtime/mirror/class.cc16
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/modifiers.h15
-rw-r--r--runtime/native/scoped_fast_native_object_access-inl.h2
-rw-r--r--runtime/thread_list.cc7
-rwxr-xr-xtest/669-moveable-string-class-equals/run2
-rw-r--r--test/669-moveable-string-class-equals/src/Main.java4
-rwxr-xr-xtools/buildbot-build.sh19
-rw-r--r--tools/libjdwp_art_failures.txt6
-rw-r--r--tools/libjdwp_oj_art_failures.txt6
35 files changed, 657 insertions, 375 deletions
diff --git a/compiler/compiler.h b/compiler/compiler.h
index cfed6d5a8e..3aa84f8e2b 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -46,12 +46,6 @@ class Compiler {
kOptimizing
};
- enum JniOptimizationFlags {
- kNone = 0x0,
- kFastNative = 0x1,
- kCriticalNative = 0x2,
- };
-
static Compiler* Create(CompilerDriver* driver, Kind kind);
virtual void Init() = 0;
@@ -71,8 +65,7 @@ class Compiler {
virtual CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- JniOptimizationFlags optimization_flags) const = 0;
+ const DexFile& dex_file) const = 0;
virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a9d27ef0cc..32d0bbe495 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -46,6 +46,7 @@
#include "dex/verified_method.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
+#include "dex_file_annotations.h"
#include "dex_instruction-inl.h"
#include "driver/compiler_options.h"
#include "gc/accounting/card_table-inl.h"
@@ -511,40 +512,11 @@ static void CompileMethod(Thread* self,
InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
// Leaving this empty will trigger the generic JNI version
} else {
- // Look-up the ArtMethod associated with this code_item (if any)
- // -- It is later used to lookup any [optimization] annotations for this method.
- ScopedObjectAccess soa(self);
-
- // TODO: Lookup annotation from DexFile directly without resolving method.
- ArtMethod* method =
- Runtime::Current()->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- dex_file,
- method_idx,
- dex_cache,
- class_loader,
- /* referrer */ nullptr,
- invoke_type);
-
// Query any JNI optimization annotations such as @FastNative or @CriticalNative.
- Compiler::JniOptimizationFlags optimization_flags = Compiler::kNone;
- if (UNLIKELY(method == nullptr)) {
- // Failed method resolutions happen very rarely, e.g. ancestor class cannot be resolved.
- DCHECK(self->IsExceptionPending());
- self->ClearException();
- } else if (method->IsAnnotatedWithFastNative()) {
- // TODO: Will no longer need this CHECK once we have verifier checking this.
- CHECK(!method->IsAnnotatedWithCriticalNative());
- optimization_flags = Compiler::kFastNative;
- } else if (method->IsAnnotatedWithCriticalNative()) {
- // TODO: Will no longer need this CHECK once we have verifier checking this.
- CHECK(!method->IsAnnotatedWithFastNative());
- optimization_flags = Compiler::kCriticalNative;
- }
+ access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
+ dex_file, dex_file.GetClassDef(class_def_idx), method_idx);
- compiled_method = driver->GetCompiler()->JniCompile(access_flags,
- method_idx,
- dex_file,
- optimization_flags);
+ compiled_method = driver->GetCompiler()->JniCompile(access_flags, method_idx, dex_file);
CHECK(compiled_method != nullptr);
}
} else if ((access_flags & kAccAbstract) != 0) {
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 3460efe474..daf64d1298 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -55,10 +55,10 @@ extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar(JNIEnv*, jclass, jint
namespace art {
enum class JniKind {
- kNormal = Compiler::kNone, // Regular kind of un-annotated natives.
- kFast = Compiler::kFastNative, // Native method annotated with @FastNative.
- kCritical = Compiler::kCriticalNative, // Native method annotated with @CriticalNative.
- kCount = Compiler::kCriticalNative + 1 // How many different types of JNIs we can have.
+ kNormal, // Regular kind of un-annotated natives.
+ kFast, // Native method annotated with @FastNative.
+ kCritical, // Native method annotated with @CriticalNative.
+ kCount // How many different types of JNIs we can have.
};
// Used to initialize array sizes that want to have different state per current jni.
@@ -2205,8 +2205,8 @@ void JniCompilerTest::NormalNativeImpl() {
ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
- EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
- EXPECT_FALSE(method->IsAnnotatedWithFastNative());
+ EXPECT_FALSE(method->IsCriticalNative());
+ EXPECT_FALSE(method->IsFastNative());
}
// TODO: just rename the java functions to the standard convention and remove duplicated tests
@@ -2227,8 +2227,8 @@ void JniCompilerTest::FastNativeImpl() {
ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
- EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
- EXPECT_TRUE(method->IsAnnotatedWithFastNative());
+ EXPECT_FALSE(method->IsCriticalNative());
+ EXPECT_TRUE(method->IsFastNative());
}
// TODO: just rename the java functions to the standard convention and remove duplicated tests
@@ -2256,8 +2256,8 @@ void JniCompilerTest::CriticalNativeImpl() {
ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
- EXPECT_TRUE(method->IsAnnotatedWithCriticalNative());
- EXPECT_FALSE(method->IsAnnotatedWithFastNative());
+ EXPECT_TRUE(method->IsCriticalNative());
+ EXPECT_FALSE(method->IsFastNative());
EXPECT_EQ(0, gJava_myClassNatives_criticalNative_calls[gCurrentJni]);
env_->CallStaticVoidMethod(jklass_, jmethod_);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b3177aa471..b93b05cbd4 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -52,8 +52,6 @@
namespace art {
-using JniOptimizationFlags = Compiler::JniOptimizationFlags;
-
template <PointerSize kPointerSize>
static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
@@ -120,8 +118,7 @@ template <PointerSize kPointerSize>
static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- JniOptimizationFlags optimization_flags) {
+ const DexFile& dex_file) {
const bool is_native = (access_flags & kAccNative) != 0;
CHECK(is_native);
const bool is_static = (access_flags & kAccStatic) != 0;
@@ -131,10 +128,10 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures();
// i.e. if the method was annotated with @FastNative
- const bool is_fast_native = (optimization_flags == Compiler::kFastNative);
+ const bool is_fast_native = (access_flags & kAccFastNative) != 0u;
// i.e. if the method was annotated with @CriticalNative
- bool is_critical_native = (optimization_flags == Compiler::kCriticalNative);
+ bool is_critical_native = (access_flags & kAccCriticalNative) != 0u;
VLOG(jni) << "JniCompile: Method :: "
<< dex_file.PrettyMethod(method_idx, /* with signature */ true)
@@ -781,14 +778,13 @@ static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- Compiler::JniOptimizationFlags optimization_flags) {
+ const DexFile& dex_file) {
if (Is64BitInstructionSet(compiler->GetInstructionSet())) {
return ArtJniCompileMethodInternal<PointerSize::k64>(
- compiler, access_flags, method_idx, dex_file, optimization_flags);
+ compiler, access_flags, method_idx, dex_file);
} else {
return ArtJniCompileMethodInternal<PointerSize::k32>(
- compiler, access_flags, method_idx, dex_file, optimization_flags);
+ compiler, access_flags, method_idx, dex_file);
}
}
diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h
index 26c32a31b8..3fcce55b5a 100644
--- a/compiler/jni/quick/jni_compiler.h
+++ b/compiler/jni/quick/jni_compiler.h
@@ -28,8 +28,7 @@ class CompiledMethod;
CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- Compiler::JniOptimizationFlags optimization_flags);
+ const DexFile& dex_file);
} // namespace art
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc
index 5a8ac59195..8b1812a6de 100644
--- a/compiler/optimizing/load_store_analysis.cc
+++ b/compiler/optimizing/load_store_analysis.cc
@@ -22,111 +22,130 @@ namespace art {
// The number of heap locations for most of the methods stays below this threshold.
constexpr size_t kMaxNumberOfHeapLocations = 32;
-// Check if array indices array[idx1 +/- CONST] and array[idx2] MAY alias.
-static bool BinaryOpAndIndexMayAlias(const HBinaryOperation* idx1, const HInstruction* idx2) {
- DCHECK(idx1 != nullptr);
- DCHECK(idx2 != nullptr);
+// Test if two integer ranges [l1,h1] and [l2,h2] overlap.
+// Note that the ranges are inclusive on both ends.
+// l1|------|h1
+// l2|------|h2
+static bool CanIntegerRangesOverlap(int64_t l1, int64_t h1, int64_t l2, int64_t h2) {
+ return std::max(l1, l2) <= std::min(h1, h2);
+}
- if (!idx1->IsAdd() && !idx1->IsSub()) {
+static bool IsAddOrSub(const HInstruction* instruction) {
+ return instruction->IsAdd() || instruction->IsSub();
+}
+
+static bool CanBinaryOpAndIndexAlias(const HBinaryOperation* idx1,
+ const size_t vector_length1,
+ const HInstruction* idx2,
+ const size_t vector_length2) {
+ if (!IsAddOrSub(idx1)) {
// We currently only support Add and Sub operations.
return true;
}
-
- HConstant* cst = idx1->GetConstantRight();
- if (cst == nullptr || cst->IsArithmeticZero()) {
+ if (idx1->AsBinaryOperation()->GetLeastConstantLeft() != idx2) {
+ // Cannot analyze [i+CONST1] and [j].
return true;
}
-
- if (idx1->GetLeastConstantLeft() == idx2) {
- // for example, array[idx1 + 1] and array[idx1]
- return false;
+ if (!idx1->GetConstantRight()->IsIntConstant()) {
+ return true;
}
- return true;
+ // Since 'i' are the same in [i+CONST] and [i],
+ // further compare [CONST] and [0].
+ int64_t l1 = idx1->IsAdd() ?
+ idx1->GetConstantRight()->AsIntConstant()->GetValue() :
+ -idx1->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t l2 = 0;
+ int64_t h1 = l1 + (vector_length1 - 1);
+ int64_t h2 = l2 + (vector_length2 - 1);
+ return CanIntegerRangesOverlap(l1, h1, l2, h2);
}
-// Check if Add and Sub MAY alias when used as indices in arrays.
-static bool BinaryOpsMayAlias(const HBinaryOperation* idx1, const HBinaryOperation* idx2) {
- DCHECK(idx1!= nullptr);
- DCHECK(idx2 != nullptr);
-
- HConstant* idx1_cst = idx1->GetConstantRight();
- HInstruction* idx1_other = idx1->GetLeastConstantLeft();
- HConstant* idx2_cst = idx2->GetConstantRight();
- HInstruction* idx2_other = idx2->GetLeastConstantLeft();
-
- if (idx1_cst == nullptr || idx1_other == nullptr ||
- idx2_cst == nullptr || idx2_other == nullptr) {
- // We only analyze patterns like [i +/- CONST].
+static bool CanBinaryOpsAlias(const HBinaryOperation* idx1,
+ const size_t vector_length1,
+ const HBinaryOperation* idx2,
+ const size_t vector_length2) {
+ if (!IsAddOrSub(idx1) || !IsAddOrSub(idx2)) {
+ // We currently only support Add and Sub operations.
return true;
}
-
- if (idx1_other != idx2_other) {
- // For example, [j+1] and [k+1] MAY alias.
+ if (idx1->AsBinaryOperation()->GetLeastConstantLeft() !=
+ idx2->AsBinaryOperation()->GetLeastConstantLeft()) {
+ // Cannot analyze [i+CONST1] and [j+CONST2].
return true;
}
-
- if ((idx1->IsAdd() && idx2->IsAdd()) ||
- (idx1->IsSub() && idx2->IsSub())) {
- return idx1_cst->AsIntConstant()->GetValue() == idx2_cst->AsIntConstant()->GetValue();
- }
-
- if ((idx1->IsAdd() && idx2->IsSub()) ||
- (idx1->IsSub() && idx2->IsAdd())) {
- // [i + CONST1] and [i - CONST2] MAY alias iff CONST1 == -CONST2.
- // By checking CONST1 == -CONST2, following cases are handled:
- // - Zero constants case [i+0] and [i-0] is handled.
- // - Overflow cases are handled, for example:
- // [i+0x80000000] and [i-0x80000000];
- // [i+0x10] and [i-0xFFFFFFF0].
- // - Other cases [i+CONST1] and [i-CONST2] without any overflow is handled.
- return idx1_cst->AsIntConstant()->GetValue() == -(idx2_cst->AsIntConstant()->GetValue());
+ if (!idx1->GetConstantRight()->IsIntConstant() ||
+ !idx2->GetConstantRight()->IsIntConstant()) {
+ return true;
}
- // All other cases, MAY alias.
- return true;
+ // Since 'i' are the same in [i+CONST1] and [i+CONST2],
+ // further compare [CONST1] and [CONST2].
+ int64_t l1 = idx1->IsAdd() ?
+ idx1->GetConstantRight()->AsIntConstant()->GetValue() :
+ -idx1->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t l2 = idx2->IsAdd() ?
+ idx2->GetConstantRight()->AsIntConstant()->GetValue() :
+ -idx2->GetConstantRight()->AsIntConstant()->GetValue();
+ int64_t h1 = l1 + (vector_length1 - 1);
+ int64_t h2 = l2 + (vector_length2 - 1);
+ return CanIntegerRangesOverlap(l1, h1, l2, h2);
}
-// The following array index cases are handled:
-// [i] and [i]
-// [CONST1] and [CONST2]
-// [i] and [i+CONST]
-// [i] and [i-CONST]
-// [i+CONST1] and [i+CONST2]
-// [i-CONST1] and [i-CONST2]
-// [i+CONST1] and [i-CONST2]
-// [i-CONST1] and [i+CONST2]
-// For other complicated cases, we rely on other passes like GVN and simpilfier
-// to optimize these cases before this pass.
-// For example: [i+j+k+10] and [i+k+10+j] shall be optimized to [i7+10] and [i7+10].
-bool HeapLocationCollector::CanArrayIndicesAlias(const HInstruction* idx1,
- const HInstruction* idx2) const {
+bool HeapLocationCollector::CanArrayElementsAlias(const HInstruction* idx1,
+ const size_t vector_length1,
+ const HInstruction* idx2,
+ const size_t vector_length2) const {
DCHECK(idx1 != nullptr);
DCHECK(idx2 != nullptr);
+ DCHECK_GE(vector_length1, HeapLocation::kScalar);
+ DCHECK_GE(vector_length2, HeapLocation::kScalar);
+ // [i] and [i].
if (idx1 == idx2) {
- // [i] and [i]
return true;
}
- if (idx1->IsIntConstant() && idx2->IsIntConstant()) {
- // [CONST1] and [CONST2]
- return idx1->AsIntConstant()->GetValue() == idx2->AsIntConstant()->GetValue();
- }
-
- if (idx1->IsBinaryOperation() && !BinaryOpAndIndexMayAlias(idx1->AsBinaryOperation(), idx2)) {
- // [i] and [i+/-CONST]
- return false;
- }
- if (idx2->IsBinaryOperation() && !BinaryOpAndIndexMayAlias(idx2->AsBinaryOperation(), idx1)) {
- // [i+/-CONST] and [i]
- return false;
- }
- if (idx1->IsBinaryOperation() && idx2->IsBinaryOperation()) {
- // [i+/-CONST1] and [i+/-CONST2]
- if (!BinaryOpsMayAlias(idx1->AsBinaryOperation(), idx2->AsBinaryOperation())) {
- return false;
- }
+ // [CONST1] and [CONST2].
+ if (idx1->IsIntConstant() && idx2->IsIntConstant()) {
+ int64_t l1 = idx1->AsIntConstant()->GetValue();
+ int64_t l2 = idx2->AsIntConstant()->GetValue();
+ // To avoid any overflow in following CONST+vector_length calculation,
+ // use int64_t instead of int32_t.
+ int64_t h1 = l1 + (vector_length1 - 1);
+ int64_t h2 = l2 + (vector_length2 - 1);
+ return CanIntegerRangesOverlap(l1, h1, l2, h2);
+ }
+
+ // [i+CONST] and [i].
+ if (idx1->IsBinaryOperation() &&
+ idx1->AsBinaryOperation()->GetConstantRight() != nullptr &&
+ idx1->AsBinaryOperation()->GetLeastConstantLeft() == idx2) {
+ return CanBinaryOpAndIndexAlias(idx1->AsBinaryOperation(),
+ vector_length1,
+ idx2,
+ vector_length2);
+ }
+
+ // [i] and [i+CONST].
+ if (idx2->IsBinaryOperation() &&
+ idx2->AsBinaryOperation()->GetConstantRight() != nullptr &&
+ idx2->AsBinaryOperation()->GetLeastConstantLeft() == idx1) {
+ return CanBinaryOpAndIndexAlias(idx2->AsBinaryOperation(),
+ vector_length2,
+ idx1,
+ vector_length1);
+ }
+
+ // [i+CONST1] and [i+CONST2].
+ if (idx1->IsBinaryOperation() &&
+ idx1->AsBinaryOperation()->GetConstantRight() != nullptr &&
+ idx2->IsBinaryOperation() &&
+ idx2->AsBinaryOperation()->GetConstantRight() != nullptr) {
+ return CanBinaryOpsAlias(idx1->AsBinaryOperation(),
+ vector_length1,
+ idx2->AsBinaryOperation(),
+ vector_length2);
}
// By default, MAY alias.
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index 5a1df45914..999026cb6a 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -102,23 +102,26 @@ class ReferenceInfo : public ArenaObject<kArenaAllocLSA> {
class HeapLocation : public ArenaObject<kArenaAllocLSA> {
public:
static constexpr size_t kInvalidFieldOffset = -1;
-
+ // Default value for heap locations which are not vector data.
+ static constexpr size_t kScalar = 1;
// TODO: more fine-grained array types.
static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
HeapLocation(ReferenceInfo* ref_info,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index)
: ref_info_(ref_info),
offset_(offset),
index_(index),
+ vector_length_(vector_length),
declaring_class_def_index_(declaring_class_def_index),
value_killed_by_loop_side_effects_(true) {
DCHECK(ref_info != nullptr);
DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
(offset != kInvalidFieldOffset && index == nullptr));
- if (ref_info->IsSingleton() && !IsArrayElement()) {
+ if (ref_info->IsSingleton() && !IsArray()) {
// Assume this location's value cannot be killed by loop side effects
// until proven otherwise.
value_killed_by_loop_side_effects_ = false;
@@ -128,6 +131,7 @@ class HeapLocation : public ArenaObject<kArenaAllocLSA> {
ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
size_t GetOffset() const { return offset_; }
HInstruction* GetIndex() const { return index_; }
+ size_t GetVectorLength() const { return vector_length_; }
// Returns the definition of declaring class' dex index.
// It's kDeclaringClassDefIndexForArrays for an array element.
@@ -135,7 +139,7 @@ class HeapLocation : public ArenaObject<kArenaAllocLSA> {
return declaring_class_def_index_;
}
- bool IsArrayElement() const {
+ bool IsArray() const {
return index_ != nullptr;
}
@@ -148,15 +152,26 @@ class HeapLocation : public ArenaObject<kArenaAllocLSA> {
}
private:
- ReferenceInfo* const ref_info_; // reference for instance/static field or array access.
- const size_t offset_; // offset of static/instance field.
- HInstruction* const index_; // index of an array element.
- const int16_t declaring_class_def_index_; // declaring class's def's dex index.
- bool value_killed_by_loop_side_effects_; // value of this location may be killed by loop
- // side effects because this location is stored
- // into inside a loop. This gives
- // better info on whether a singleton's location
- // value may be killed by loop side effects.
+ // Reference for instance/static field, array element or vector data.
+ ReferenceInfo* const ref_info_;
+ // Offset of static/instance field.
+ // Invalid when this HeapLocation is not field.
+ const size_t offset_;
+ // Index of an array element or starting index of vector data.
+ // Invalid when this HeapLocation is not array.
+ HInstruction* const index_;
+ // Vector length of vector data.
+ // When this HeapLocation is not vector data, it's value is kScalar.
+ const size_t vector_length_;
+ // Declaring class's def's dex index.
+ // Invalid when this HeapLocation is not field access.
+ const int16_t declaring_class_def_index_;
+
+ // Value of this location may be killed by loop side effects
+ // because this location is stored into inside a loop.
+ // This gives better info on whether a singleton's location
+ // value may be killed by loop side effects.
+ bool value_killed_by_loop_side_effects_;
DISALLOW_COPY_AND_ASSIGN(HeapLocation);
};
@@ -218,14 +233,26 @@ class HeapLocationCollector : public HGraphVisitor {
return nullptr;
}
- size_t GetArrayAccessHeapLocation(HInstruction* array, HInstruction* index) const {
+ size_t GetFieldHeapLocation(HInstruction* object, const FieldInfo* field) const {
+ DCHECK(object != nullptr);
+ DCHECK(field != nullptr);
+ return FindHeapLocationIndex(FindReferenceInfoOf(HuntForOriginalReference(object)),
+ field->GetFieldOffset().SizeValue(),
+ nullptr,
+ HeapLocation::kScalar,
+ field->GetDeclaringClassDefIndex());
+ }
+
+ size_t GetArrayHeapLocation(HInstruction* array,
+ HInstruction* index,
+ size_t vector_length = HeapLocation::kScalar) const {
DCHECK(array != nullptr);
DCHECK(index != nullptr);
- HInstruction* original_ref = HuntForOriginalReference(array);
- ReferenceInfo* ref_info = FindReferenceInfoOf(original_ref);
- return FindHeapLocationIndex(ref_info,
+ DCHECK_GE(vector_length, HeapLocation::kScalar);
+ return FindHeapLocationIndex(FindReferenceInfoOf(HuntForOriginalReference(array)),
HeapLocation::kInvalidFieldOffset,
index,
+ vector_length,
HeapLocation::kDeclaringClassDefIndexForArrays);
}
@@ -242,15 +269,26 @@ class HeapLocationCollector : public HGraphVisitor {
}
// Find and return the heap location index in heap_locations_.
+ // NOTE: When heap locations are created, potentially aliasing/overlapping
+ // accesses are given different indexes. This find function also
+ // doesn't take aliasing/overlapping into account. For example,
+ // this function returns three different indexes for:
+ // - ref_info=array, index=i, vector_length=kScalar;
+ // - ref_info=array, index=i, vector_length=2;
+ // - ref_info=array, index=i, vector_length=4;
+ // In later analysis, ComputeMayAlias() and MayAlias() compute and tell whether
+ // these indexes alias.
size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index) const {
for (size_t i = 0; i < heap_locations_.size(); i++) {
HeapLocation* loc = heap_locations_[i];
if (loc->GetReferenceInfo() == ref_info &&
loc->GetOffset() == offset &&
loc->GetIndex() == index &&
+ loc->GetVectorLength() == vector_length &&
loc->GetDeclaringClassDefIndex() == declaring_class_def_index) {
return i;
}
@@ -315,7 +353,10 @@ class HeapLocationCollector : public HGraphVisitor {
return true;
}
- bool CanArrayIndicesAlias(const HInstruction* i1, const HInstruction* i2) const;
+ bool CanArrayElementsAlias(const HInstruction* idx1,
+ const size_t vector_length1,
+ const HInstruction* idx2,
+ const size_t vector_length2) const;
// `index1` and `index2` are indices in the array of collected heap locations.
// Returns the position in the bit vector that tracks whether the two heap
@@ -340,7 +381,7 @@ class HeapLocationCollector : public HGraphVisitor {
HeapLocation* loc2 = heap_locations_[index2];
if (loc1->GetOffset() != loc2->GetOffset()) {
// Either two different instance fields, or one is an instance
- // field and the other is an array element.
+ // field and the other is an array data.
return false;
}
if (loc1->GetDeclaringClassDefIndex() != loc2->GetDeclaringClassDefIndex()) {
@@ -350,10 +391,12 @@ class HeapLocationCollector : public HGraphVisitor {
if (!CanReferencesAlias(loc1->GetReferenceInfo(), loc2->GetReferenceInfo())) {
return false;
}
- if (loc1->IsArrayElement() && loc2->IsArrayElement()) {
- HInstruction* array_index1 = loc1->GetIndex();
- HInstruction* array_index2 = loc2->GetIndex();
- if (!CanArrayIndicesAlias(array_index1, array_index2)) {
+ if (loc1->IsArray() && loc2->IsArray()) {
+ HInstruction* idx1 = loc1->GetIndex();
+ HInstruction* idx2 = loc2->GetIndex();
+ size_t vector_length1 = loc1->GetVectorLength();
+ size_t vector_length2 = loc2->GetVectorLength();
+ if (!CanArrayElementsAlias(idx1, vector_length1, idx2, vector_length2)) {
return false;
}
ReferenceInfo* ref_info = loc1->GetReferenceInfo();
@@ -383,14 +426,15 @@ class HeapLocationCollector : public HGraphVisitor {
HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index) {
HInstruction* original_ref = HuntForOriginalReference(ref);
ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
size_t heap_location_idx = FindHeapLocationIndex(
- ref_info, offset, index, declaring_class_def_index);
+ ref_info, offset, index, vector_length, declaring_class_def_index);
if (heap_location_idx == kHeapLocationNotFound) {
HeapLocation* heap_loc = new (GetGraph()->GetAllocator())
- HeapLocation(ref_info, offset, index, declaring_class_def_index);
+ HeapLocation(ref_info, offset, index, vector_length, declaring_class_def_index);
heap_locations_.push_back(heap_loc);
return heap_loc;
}
@@ -403,12 +447,19 @@ class HeapLocationCollector : public HGraphVisitor {
}
const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
const size_t offset = field_info.GetFieldOffset().SizeValue();
- return GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
+ return GetOrCreateHeapLocation(ref,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index);
}
- void VisitArrayAccess(HInstruction* array, HInstruction* index) {
- GetOrCreateHeapLocation(array, HeapLocation::kInvalidFieldOffset,
- index, HeapLocation::kDeclaringClassDefIndexForArrays);
+ void VisitArrayAccess(HInstruction* array, HInstruction* index, size_t vector_length) {
+ GetOrCreateHeapLocation(array,
+ HeapLocation::kInvalidFieldOffset,
+ index,
+ vector_length,
+ HeapLocation::kDeclaringClassDefIndexForArrays);
}
void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
@@ -456,12 +507,30 @@ class HeapLocationCollector : public HGraphVisitor {
// since we cannot accurately track the fields.
void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
- VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, HeapLocation::kScalar);
CreateReferenceInfoForReferenceType(instruction);
}
void VisitArraySet(HArraySet* instruction) OVERRIDE {
- VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, HeapLocation::kScalar);
+ has_heap_stores_ = true;
+ }
+
+ void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, instruction->GetVectorLength());
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
+ void VisitVecStore(HVecStore* instruction) OVERRIDE {
+ HInstruction* array = instruction->InputAt(0);
+ HInstruction* index = instruction->InputAt(1);
+ VisitArrayAccess(array, index, instruction->GetVectorLength());
has_heap_stores_ = true;
}
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index b41e1e4d00..56361a8c90 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -78,11 +78,12 @@ TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
// Test queries on HeapLocationCollector's ref info and index records.
ReferenceInfo* ref = heap_location_collector.FindReferenceInfoOf(array);
- size_t field_off = HeapLocation::kInvalidFieldOffset;
+ size_t field = HeapLocation::kInvalidFieldOffset;
+ size_t vec = HeapLocation::kScalar;
size_t class_def = HeapLocation::kDeclaringClassDefIndexForArrays;
- size_t loc1 = heap_location_collector.FindHeapLocationIndex(ref, field_off, c1, class_def);
- size_t loc2 = heap_location_collector.FindHeapLocationIndex(ref, field_off, c2, class_def);
- size_t loc3 = heap_location_collector.FindHeapLocationIndex(ref, field_off, index, class_def);
+ size_t loc1 = heap_location_collector.FindHeapLocationIndex(ref, field, c1, vec, class_def);
+ size_t loc2 = heap_location_collector.FindHeapLocationIndex(ref, field, c2, vec, class_def);
+ size_t loc3 = heap_location_collector.FindHeapLocationIndex(ref, field, index, vec, class_def);
// must find this reference info for array in HeapLocationCollector.
ASSERT_TRUE(ref != nullptr);
// must find these heap locations;
@@ -167,10 +168,8 @@ TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
// Test queries on HeapLocationCollector's ref info and index records.
ReferenceInfo* ref = heap_location_collector.FindReferenceInfoOf(object);
- size_t loc1 = heap_location_collector.FindHeapLocationIndex(
- ref, 10, nullptr, kUnknownClassDefIndex);
- size_t loc2 = heap_location_collector.FindHeapLocationIndex(
- ref, 20, nullptr, kUnknownClassDefIndex);
+ size_t loc1 = heap_location_collector.GetFieldHeapLocation(object, &get_field10->GetFieldInfo());
+ size_t loc2 = heap_location_collector.GetFieldHeapLocation(object, &get_field20->GetFieldInfo());
// must find references info for object and in HeapLocationCollector.
ASSERT_TRUE(ref != nullptr);
// must find these heap locations.
@@ -247,31 +246,236 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
// Test alias: array[0] and array[1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, c0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, c1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0] and array[i-0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+1] and array[i-1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+1] and array[1-i]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, rev_sub1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, rev_sub1);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+1] and array[i-(-1)]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_neg1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_neg1);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
}
+TEST_F(LoadStoreAnalysisTest, ArrayAliasingTest) {
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ graph_->BuildDominatorTree();
+
+ HInstruction* array = new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
+ HInstruction* index = new (GetAllocator()) HParameterValue(
+ graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
+ HInstruction* c0 = graph_->GetIntConstant(0);
+ HInstruction* c1 = graph_->GetIntConstant(1);
+ HInstruction* c6 = graph_->GetIntConstant(6);
+ HInstruction* c8 = graph_->GetIntConstant(8);
+
+ HInstruction* arr_set_0 = new (GetAllocator()) HArraySet(array,
+ c0,
+ c0,
+ DataType::Type::kInt32,
+ 0);
+ HInstruction* arr_set_1 = new (GetAllocator()) HArraySet(array,
+ c1,
+ c0,
+ DataType::Type::kInt32,
+ 0);
+ HInstruction* arr_set_i = new (GetAllocator()) HArraySet(array,
+ index,
+ c0,
+ DataType::Type::kInt32,
+ 0);
+
+ HVecOperation* v1 = new (GetAllocator()) HVecReplicateScalar(GetAllocator(),
+ c1,
+ DataType::Type::kInt32,
+ 4,
+ kNoDexPc);
+ HVecOperation* v2 = new (GetAllocator()) HVecReplicateScalar(GetAllocator(),
+ c1,
+ DataType::Type::kInt32,
+ 2,
+ kNoDexPc);
+ HInstruction* i_add6 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c6);
+ HInstruction* i_add8 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c8);
+
+ HInstruction* vstore_0 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ c0,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_1 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ c1,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_8 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ c8,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ index,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i_add6 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ i_add6,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i_add8 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ i_add8,
+ v1,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HInstruction* vstore_i_add6_vlen2 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ array,
+ i_add6,
+ v2,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 2,
+ kNoDexPc);
+
+ entry->AddInstruction(array);
+ entry->AddInstruction(index);
+
+ entry->AddInstruction(arr_set_0);
+ entry->AddInstruction(arr_set_1);
+ entry->AddInstruction(arr_set_i);
+ entry->AddInstruction(v1);
+ entry->AddInstruction(v2);
+ entry->AddInstruction(i_add6);
+ entry->AddInstruction(i_add8);
+ entry->AddInstruction(vstore_0);
+ entry->AddInstruction(vstore_1);
+ entry->AddInstruction(vstore_8);
+ entry->AddInstruction(vstore_i);
+ entry->AddInstruction(vstore_i_add6);
+ entry->AddInstruction(vstore_i_add8);
+ entry->AddInstruction(vstore_i_add6_vlen2);
+
+ LoadStoreAnalysis lsa(graph_);
+ lsa.Run();
+ const HeapLocationCollector& heap_location_collector = lsa.GetHeapLocationCollector();
+
+ // LSA/HeapLocationCollector should see those instructions.
+ ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 10U);
+ ASSERT_TRUE(heap_location_collector.HasHeapStores());
+
+ // Test queries on HeapLocationCollector's aliasing matrix after load store analysis.
+ size_t loc1, loc2;
+
+ // Test alias: array[0] and array[0,1,2,3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0] and array[8,9,10,11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[1] and array[8,9,10,11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[1] and array[0,1,2,3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0,1,2,3] and array[8,9,10,11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0,1,2,3] and array[1,2,3,4]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c1, 4);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[0] and array[i,i+1,i+2,i+3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i] and array[0,1,2,3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i] and array[i,i+1,i+2,i+3]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i] and array[i+8,i+9,i+10,i+11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i+6,i+7,i+8,i+9] and array[i+8,i+9,i+10,i+11]
+ // Test partial overlap.
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 4);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+ ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i+6,i+7] and array[i,i+1,i+2,i+3]
+ // Test different vector lengths.
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 2);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+ // Test alias: array[i+6,i+7] and array[i+8,i+9,i+10,i+11]
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 2);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+ ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+}
+
TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
@@ -359,33 +563,33 @@ TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
// Test alias: array[i+0x80000000] and array[i-0x80000000]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0x80000000);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000000);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x80000000);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0x10] and array[i-0xFFFFFFF0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0x10);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0xFFFFFFF0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x10);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0xFFFFFFF0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0x7FFFFFFF] and array[i-0x80000001]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0x7FFFFFFF);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000001);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x7FFFFFFF);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000001);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Test alias: array[i+0] and array[i-0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
// Should not alias:
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000000);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000001);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000001);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
// Should not alias:
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, add_0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(array, sub_0x80000000);
+ loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
}
@@ -443,10 +647,10 @@ TEST_F(LoadStoreAnalysisTest, TestHuntOriginalRef) {
// times the original reference has been transformed by BoundType,
// NullCheck, IntermediateAddress, etc.
ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 1U);
- size_t loc1 = heap_location_collector.GetArrayAccessHeapLocation(array, c1);
- size_t loc2 = heap_location_collector.GetArrayAccessHeapLocation(bound_type, c1);
- size_t loc3 = heap_location_collector.GetArrayAccessHeapLocation(null_check, c1);
- size_t loc4 = heap_location_collector.GetArrayAccessHeapLocation(inter_addr, c1);
+ size_t loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
+ size_t loc2 = heap_location_collector.GetArrayHeapLocation(bound_type, c1);
+ size_t loc3 = heap_location_collector.GetArrayHeapLocation(null_check, c1);
+ size_t loc4 = heap_location_collector.GetArrayHeapLocation(inter_addr, c1);
ASSERT_TRUE(loc1 != HeapLocationCollector::kHeapLocationNotFound);
ASSERT_EQ(loc1, loc2);
ASSERT_EQ(loc1, loc3);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index c89961353b..8678fab655 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -302,11 +302,12 @@ class LSEVisitor : public HGraphDelegateVisitor {
HInstruction* ref,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index) {
HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
size_t idx = heap_location_collector_.FindHeapLocationIndex(
- ref_info, offset, index, declaring_class_def_index);
+ ref_info, offset, index, vector_length, declaring_class_def_index);
DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
@@ -367,12 +368,13 @@ class LSEVisitor : public HGraphDelegateVisitor {
HInstruction* ref,
size_t offset,
HInstruction* index,
+ size_t vector_length,
int16_t declaring_class_def_index,
HInstruction* value) {
HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
size_t idx = heap_location_collector_.FindHeapLocationIndex(
- ref_info, offset, index, declaring_class_def_index);
+ ref_info, offset, index, vector_length, declaring_class_def_index);
DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
@@ -446,7 +448,12 @@ class LSEVisitor : public HGraphDelegateVisitor {
HInstruction* obj = instruction->InputAt(0);
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
- VisitGetLocation(instruction, obj, offset, nullptr, declaring_class_def_index);
+ VisitGetLocation(instruction,
+ obj,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index);
}
void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
@@ -454,14 +461,25 @@ class LSEVisitor : public HGraphDelegateVisitor {
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
HInstruction* value = instruction->InputAt(1);
- VisitSetLocation(instruction, obj, offset, nullptr, declaring_class_def_index, value);
+ VisitSetLocation(instruction,
+ obj,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index,
+ value);
}
void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
HInstruction* cls = instruction->InputAt(0);
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
- VisitGetLocation(instruction, cls, offset, nullptr, declaring_class_def_index);
+ VisitGetLocation(instruction,
+ cls,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index);
}
void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
@@ -469,7 +487,13 @@ class LSEVisitor : public HGraphDelegateVisitor {
size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
HInstruction* value = instruction->InputAt(1);
- VisitSetLocation(instruction, cls, offset, nullptr, declaring_class_def_index, value);
+ VisitSetLocation(instruction,
+ cls,
+ offset,
+ nullptr,
+ HeapLocation::kScalar,
+ declaring_class_def_index,
+ value);
}
void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
@@ -479,6 +503,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
array,
HeapLocation::kInvalidFieldOffset,
index,
+ HeapLocation::kScalar,
HeapLocation::kDeclaringClassDefIndexForArrays);
}
@@ -490,6 +515,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
array,
HeapLocation::kInvalidFieldOffset,
index,
+ HeapLocation::kScalar,
HeapLocation::kDeclaringClassDefIndexForArrays,
value);
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 9233eb5baf..252d53823a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -311,13 +311,11 @@ class OptimizingCompiler FINAL : public Compiler {
CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file,
- JniOptimizationFlags optimization_flags) const OVERRIDE {
+ const DexFile& dex_file) const OVERRIDE {
return ArtQuickJniCompileMethod(GetCompilerDriver(),
access_flags,
method_idx,
- dex_file,
- optimization_flags);
+ dex_file);
}
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 8cc376c3a6..bb28d50b56 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -72,7 +72,7 @@ static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) {
size_t SchedulingGraph::ArrayAccessHeapLocation(HInstruction* array, HInstruction* index) const {
DCHECK(heap_location_collector_ != nullptr);
- size_t heap_loc = heap_location_collector_->GetArrayAccessHeapLocation(array, index);
+ size_t heap_loc = heap_location_collector_->GetArrayHeapLocation(array, index);
// This array access should be analyzed and added to HeapLocationCollector before.
DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound);
return heap_loc;
@@ -153,12 +153,7 @@ size_t SchedulingGraph::FieldAccessHeapLocation(HInstruction* obj, const FieldIn
DCHECK(field != nullptr);
DCHECK(heap_location_collector_ != nullptr);
- size_t heap_loc = heap_location_collector_->FindHeapLocationIndex(
- heap_location_collector_->FindReferenceInfoOf(
- heap_location_collector_->HuntForOriginalReference(obj)),
- field->GetFieldOffset().SizeValue(),
- nullptr,
- field->GetDeclaringClassDefIndex());
+ size_t heap_loc = heap_location_collector_->GetFieldHeapLocation(obj, field);
// This field access should be analyzed and added to HeapLocationCollector before.
DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 75dce81550..104ebc79c2 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -294,38 +294,38 @@ class SchedulerTest : public OptimizingUnitTest {
size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
// Test side effect dependency: array[0] and array[1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, c0);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, c1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, c0);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, c1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_1, arr_set_0));
// Test side effect dependency based on LSA analysis: array[i] and array[j]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, j);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, j);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i] and array[i+0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, add0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, add0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_add0, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i] and array[i-0]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, sub0);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, sub0);
ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i] and array[i+1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, i);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, add1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, add1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_add1, arr_set_i));
// Test side effect dependency based on LSA analysis: array[i+1] and array[i-1]
- loc1 = heap_location_collector.GetArrayAccessHeapLocation(arr, add1);
- loc2 = heap_location_collector.GetArrayAccessHeapLocation(arr, sub1);
+ loc1 = heap_location_collector.GetArrayHeapLocation(arr, add1);
+ loc2 = heap_location_collector.GetArrayHeapLocation(arr, sub1);
ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub1, arr_set_add1));
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index c4f16f5e2d..dcc237d6d6 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -629,8 +629,8 @@ bool Redefiner::ClassRedefinition::CheckSameMethods() {
// Since direct methods have different flags than virtual ones (specifically direct methods must
// have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
// virtual to direct.
- uint32_t new_flags = new_iter.GetMethodAccessFlags() & ~art::kAccPreviouslyWarm;
- if (new_flags != (old_method->GetAccessFlags() & (art::kAccValidMethodFlags ^ art::kAccPreviouslyWarm))) {
+ uint32_t new_flags = new_iter.GetMethodAccessFlags();
+ if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
StringPrintf("method '%s' (sig: %s) had different access flags",
new_method_name,
diff --git a/profman/boot_image_profile.cc b/profman/boot_image_profile.cc
index 4092f6ed98..e5645d370c 100644
--- a/profman/boot_image_profile.cc
+++ b/profman/boot_image_profile.cc
@@ -92,7 +92,7 @@ void GenerateBootImageProfile(
it.SkipInstanceFields();
while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) {
const uint32_t flags = it.GetMethodAccessFlags();
- if ((flags & kAccNative) != 0 || (flags & kAccFastNative) != 0) {
+ if ((flags & kAccNative) != 0) {
// Native method will get dirtied.
is_clean = false;
break;
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index eb16e6eaa2..50913def93 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -393,6 +393,7 @@ inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
bool is_synchronized = IsSynchronized();
bool skip_access_checks = SkipAccessChecks();
bool is_fast_native = IsFastNative();
+ bool is_critical_native = IsCriticalNative();
bool is_copied = IsCopied();
bool is_miranda = IsMiranda();
bool is_default = IsDefault();
@@ -405,6 +406,7 @@ inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
DCHECK_EQ(is_synchronized, IsSynchronized());
DCHECK_EQ(skip_access_checks, SkipAccessChecks());
DCHECK_EQ(is_fast_native, IsFastNative());
+ DCHECK_EQ(is_critical_native, IsCriticalNative());
DCHECK_EQ(is_copied, IsCopied());
DCHECK_EQ(is_miranda, IsMiranda());
DCHECK_EQ(is_default, IsDefault());
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 8709643c3e..0a108f93c5 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -26,7 +26,6 @@
#include "class_linker-inl.h"
#include "debugger.h"
#include "dex_file-inl.h"
-#include "dex_file_annotations.h"
#include "dex_instruction.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
@@ -392,13 +391,9 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
self->PopManagedStackFragment(fragment);
}
-const void* ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
+const void* ArtMethod::RegisterNative(const void* native_method) {
CHECK(IsNative()) << PrettyMethod();
- CHECK(!IsFastNative()) << PrettyMethod();
CHECK(native_method != nullptr) << PrettyMethod();
- if (is_fast) {
- AddAccessFlags(kAccFastNative);
- }
void* new_native_method = nullptr;
Runtime::Current()->GetRuntimeCallbacks()->RegisterNativeMethod(this,
native_method,
@@ -408,7 +403,7 @@ const void* ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
}
void ArtMethod::UnregisterNative() {
- CHECK(IsNative() && !IsFastNative()) << PrettyMethod();
+ CHECK(IsNative()) << PrettyMethod();
// restore stub to lookup native pointer via dlsym
SetEntryPointFromJni(GetJniDlsymLookupStub());
}
@@ -428,18 +423,6 @@ bool ArtMethod::IsPolymorphicSignature() {
cls == WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_VarHandle));
}
-bool ArtMethod::IsAnnotatedWithFastNative() {
- ScopedObjectAccess soa(Thread::Current());
- return annotations::HasFastNativeMethodBuildAnnotation(
- *GetDexFile(), GetClassDef(), GetDexMethodIndex());
-}
-
-bool ArtMethod::IsAnnotatedWithCriticalNative() {
- ScopedObjectAccess soa(Thread::Current());
- return annotations::HasCriticalNativeMethodBuildAnnotation(
- *GetDexFile(), GetClassDef(), GetDexMethodIndex());
-}
-
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file,
uint16_t class_def_idx,
uint32_t method_idx) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index df9b3aa852..c17eef1834 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -202,9 +202,9 @@ class ArtMethod FINAL {
}
bool IsMiranda() {
- static_assert((kAccMiranda & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
- "kAccMiranda conflicts with intrinsic modifier");
- return (GetAccessFlags() & kAccMiranda) != 0;
+ // The kAccMiranda flag value is used with a different meaning for native methods,
+ // so we need to check the kAccNative flag as well.
+ return (GetAccessFlags() & (kAccNative | kAccMiranda)) == kAccMiranda;
}
// Returns true if invoking this method will not throw an AbstractMethodError or
@@ -215,6 +215,7 @@ class ArtMethod FINAL {
bool IsCompilable() {
if (IsIntrinsic()) {
+ // kAccCompileDontBother overlaps with kAccIntrinsicBits.
return true;
}
return (GetAccessFlags() & kAccCompileDontBother) == 0;
@@ -254,11 +255,24 @@ class ArtMethod FINAL {
return (GetAccessFlags<kReadBarrierOption>() & kAccNative) != 0;
}
+ // Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative.
bool IsFastNative() {
+ // The presence of the annotation is checked by ClassLinker and recorded in access flags.
+ // The kAccFastNative flag value is used with a different meaning for non-native methods,
+ // so we need to check the kAccNative flag as well.
constexpr uint32_t mask = kAccFastNative | kAccNative;
return (GetAccessFlags() & mask) == mask;
}
+ // Checks to see if the method was annotated with @dalvik.annotation.optimization.CriticalNative.
+ bool IsCriticalNative() {
+ // The presence of the annotation is checked by ClassLinker and recorded in access flags.
+ // The kAccCriticalNative flag value is used with a different meaning for non-native methods,
+ // so we need to check the kAccNative flag as well.
+ constexpr uint32_t mask = kAccCriticalNative | kAccNative;
+ return (GetAccessFlags() & mask) == mask;
+ }
+
bool IsAbstract() {
return (GetAccessFlags() & kAccAbstract) != 0;
}
@@ -276,10 +290,14 @@ class ArtMethod FINAL {
bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_);
bool SkipAccessChecks() {
- return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
+ // The kAccSkipAccessChecks flag value is used with a different meaning for native methods,
+ // so we need to check the kAccNative flag as well.
+ return (GetAccessFlags() & (kAccSkipAccessChecks | kAccNative)) == kAccSkipAccessChecks;
}
void SetSkipAccessChecks() {
+ // SkipAccessChecks() is applicable only to non-native methods.
+ DCHECK(!IsNative<kWithoutReadBarrier>());
AddAccessFlags(kAccSkipAccessChecks);
}
@@ -312,14 +330,6 @@ class ArtMethod FINAL {
AddAccessFlags(kAccMustCountLocks);
}
- // Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative
- // -- Independent of kAccFastNative access flags.
- bool IsAnnotatedWithFastNative();
-
- // Checks to see if the method was annotated with @dalvik.annotation.optimization.CriticalNative
- // -- Unrelated to the GC notion of "critical".
- bool IsAnnotatedWithCriticalNative();
-
// Returns true if this method could be overridden by a default method.
bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -419,7 +429,7 @@ class ArtMethod FINAL {
// Registers the native method and returns the new entry point. NB The returned entry point might
// be different from the native_method argument if some MethodCallback modifies it.
- const void* RegisterNative(const void* native_method, bool is_fast)
+ const void* RegisterNative(const void* native_method)
REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED;
void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -454,7 +464,7 @@ class ArtMethod FINAL {
// where the declaring class is treated as a weak reference (accessing it with
// a read barrier would either prevent unloading the class, or crash the runtime if
// the GC wants to unload it).
- DCHECK(!IsNative<kWithoutReadBarrier>());
+ DCHECK(!IsNative());
if (UNLIKELY(IsProxyMethod())) {
return nullptr;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 28caf81e5b..38dd7612f2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3340,6 +3340,11 @@ void ClassLinker::LoadMethod(const DexFile& dex_file,
}
}
}
+ if (UNLIKELY((access_flags & kAccNative) != 0u)) {
+ // Check if the native method is annotated with @FastNative or @CriticalNative.
+ access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
+ dex_file, dst->GetClassDef(), dex_method_idx);
+ }
dst->SetAccessFlags(access_flags);
}
@@ -7048,6 +7053,7 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
// verified yet it shouldn't have methods that are skipping access checks.
// TODO This is rather arbitrary. We should maybe support classes where only some of its
// methods are skip_access_checks.
+ DCHECK_EQ(new_method.GetAccessFlags() & kAccNative, 0u);
constexpr uint32_t kSetFlags = kAccDefault | kAccCopied;
constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks;
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
@@ -7070,6 +7076,7 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
// mark this as a default, non-abstract method, since thats what it is. Also clear the
// kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have
// methods that are skipping access checks.
+ DCHECK_EQ(new_method.GetAccessFlags() & kAccNative, 0u);
constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict | kAccCopied;
constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks);
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 5496efd108..27060aeff6 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -1239,8 +1239,11 @@ static void DCheckNativeAnnotation(const char* descriptor, jclass cls) {
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- // Lookup using the boot class path loader should yield the annotation class.
- CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+ // WellKnownClasses may not be initialized yet, so `klass` may be null.
+ if (klass != nullptr) {
+ // Lookup using the boot class path loader should yield the annotation class.
+ CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr));
+ }
}
}
@@ -1266,30 +1269,31 @@ static bool IsMethodBuildAnnotationPresent(const DexFile& dex_file,
return false;
}
-uint32_t HasFastNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index) {
- const DexFile::AnnotationSetItem* annotation_set =
- FindAnnotationSetForMethod(dex_file, class_def, method_index);
- return annotation_set != nullptr &&
- IsMethodBuildAnnotationPresent(
- dex_file,
- *annotation_set,
- "Ldalvik/annotation/optimization/FastNative;",
- WellKnownClasses::dalvik_annotation_optimization_FastNative);
-}
-
-uint32_t HasCriticalNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index) {
+uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
+ const DexFile::ClassDef& class_def,
+ uint32_t method_index) {
const DexFile::AnnotationSetItem* annotation_set =
FindAnnotationSetForMethod(dex_file, class_def, method_index);
- return annotation_set != nullptr &&
- IsMethodBuildAnnotationPresent(
- dex_file,
- *annotation_set,
- "Ldalvik/annotation/optimization/CriticalNative;",
- WellKnownClasses::dalvik_annotation_optimization_CriticalNative);
+ if (annotation_set == nullptr) {
+ return 0u;
+ }
+ uint32_t access_flags = 0u;
+ if (IsMethodBuildAnnotationPresent(
+ dex_file,
+ *annotation_set,
+ "Ldalvik/annotation/optimization/FastNative;",
+ WellKnownClasses::dalvik_annotation_optimization_FastNative)) {
+ access_flags |= kAccFastNative;
+ }
+ if (IsMethodBuildAnnotationPresent(
+ dex_file,
+ *annotation_set,
+ "Ldalvik/annotation/optimization/CriticalNative;",
+ WellKnownClasses::dalvik_annotation_optimization_CriticalNative)) {
+ access_flags |= kAccCriticalNative;
+ }
+ CHECK_NE(access_flags, kAccFastNative | kAccCriticalNative);
+ return access_flags;
}
mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
diff --git a/runtime/dex_file_annotations.h b/runtime/dex_file_annotations.h
index b1c6b7742c..a934a4f99c 100644
--- a/runtime/dex_file_annotations.h
+++ b/runtime/dex_file_annotations.h
@@ -75,15 +75,12 @@ bool IsMethodAnnotationPresent(ArtMethod* method,
uint32_t visibility = DexFile::kDexVisibilityRuntime)
REQUIRES_SHARED(Locks::mutator_lock_);
// Check whether a method from the `dex_file` with the given `method_index`
-// is annotated with @dalvik.annotation.optimization.FastNative with build visibility.
-uint32_t HasFastNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index);
-// Check whether a method from the `dex_file` with the given `method_index`
-// is annotated with @dalvik.annotation.optimization.CriticalNative with build visibility.
-uint32_t HasCriticalNativeMethodBuildAnnotation(const DexFile& dex_file,
- const DexFile::ClassDef& class_def,
- uint32_t method_index);
+// is annotated with @dalvik.annotation.optimization.FastNative or
+// @dalvik.annotation.optimization.CriticalNative with build visibility.
+// If yes, return the associated access flags, i.e. kAccFastNative or kAccCriticalNative.
+uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
+ const DexFile::ClassDef& class_def,
+ uint32_t method_index);
// Class annotations.
mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index dd0819ed8f..7ec360a93c 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -46,7 +46,7 @@ extern "C" const void* artFindNativeMethod(Thread* self) {
return nullptr;
}
// Register so that future calls don't come here
- return method->RegisterNative(native_code, false);
+ return method->RegisterNative(native_code);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index a8d2a34853..29a62c86ee 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -28,10 +28,7 @@ namespace art {
static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
-static bool kEnableAnnotationChecks = RegisterRuntimeDebugFlag(&kEnableAnnotationChecks);
-
-template <bool kDynamicFast>
-static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
+static inline void GoToRunnableFast(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
Thread* self ATTRIBUTE_UNUSED) {
@@ -56,9 +53,9 @@ extern uint32_t JniMethodFastStart(Thread* self) {
uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
- if (kIsDebugBuild && kEnableAnnotationChecks) {
+ if (kIsDebugBuild) {
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
- CHECK(native_method->IsAnnotatedWithFastNative()) << native_method->PrettyMethod();
+ CHECK(native_method->IsFastNative()) << native_method->PrettyMethod();
}
return saved_local_ref_cookie;
@@ -71,6 +68,9 @@ extern uint32_t JniMethodStart(Thread* self) {
uint32_t saved_local_ref_cookie = bit_cast<uint32_t>(env->local_ref_cookie);
env->local_ref_cookie = env->locals.GetSegmentState();
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+ // TODO: Introduce special entrypoint for synchronized @FastNative methods?
+ // Or ban synchronized @FastNative outright to avoid the extra check here?
+ DCHECK(!native_method->IsFastNative() || native_method->IsSynchronized());
if (!native_method->IsFastNative()) {
// When not fast JNI we transition out of runnable.
self->TransitionFromRunnableToSuspended(kNative);
@@ -90,25 +90,18 @@ static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
if (!is_fast) {
self->TransitionFromSuspendedToRunnable();
} else {
- GoToRunnableFast</*kDynamicFast*/true>(self);
+ GoToRunnableFast(self);
}
}
-// TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI.
-template <bool kDynamicFast>
-ALWAYS_INLINE static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
- if (kIsDebugBuild && kEnableAnnotationChecks) {
- // Should only enter here if the method is !Fast JNI or @FastNative.
+ALWAYS_INLINE static inline void GoToRunnableFast(Thread* self) {
+ if (kIsDebugBuild) {
+ // Should only enter here if the method is @FastNative.
ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
-
- if (kDynamicFast) {
- CHECK(native_method->IsFastNative()) << native_method->PrettyMethod();
- } else {
- CHECK(native_method->IsAnnotatedWithFastNative()) << native_method->PrettyMethod();
- }
+ CHECK(native_method->IsFastNative()) << native_method->PrettyMethod();
}
- // When we are in "fast" JNI or @FastNative, we are already Runnable.
+ // When we are in @FastNative, we are already Runnable.
// Only do a suspend check on the way out of JNI.
if (UNLIKELY(self->TestAllFlags())) {
// In fast JNI mode we never transitioned out of runnable. Perform a suspend check if there
@@ -138,7 +131,7 @@ extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) {
}
extern void JniMethodFastEnd(uint32_t saved_local_ref_cookie, Thread* self) {
- GoToRunnableFast</*kDynamicFast*/false>(self);
+ GoToRunnableFast(self);
PopLocalReferences(saved_local_ref_cookie, self);
}
@@ -175,7 +168,7 @@ static mirror::Object* JniMethodEndWithReferenceHandleResult(jobject result,
extern mirror::Object* JniMethodFastEndWithReference(jobject result,
uint32_t saved_local_ref_cookie,
Thread* self) {
- GoToRunnableFast</*kDynamicFast*/false>(self);
+ GoToRunnableFast(self);
return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self);
}
@@ -203,8 +196,8 @@ extern uint64_t GenericJniMethodEnd(Thread* self,
HandleScope* handle_scope)
// TODO: NO_THREAD_SAFETY_ANALYSIS as GoToRunnable() is NO_THREAD_SAFETY_ANALYSIS
NO_THREAD_SAFETY_ANALYSIS {
- bool critical_native = called->IsAnnotatedWithCriticalNative();
- bool fast_native = called->IsAnnotatedWithFastNative();
+ bool critical_native = called->IsCriticalNative();
+ bool fast_native = called->IsFastNative();
bool normal_native = !critical_native && !fast_native;
// @Fast and @CriticalNative do not do a state transition.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index a4a8c349a3..127b5d7028 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2171,32 +2171,14 @@ static void artQuickGenericJniEndJNINonRef(Thread* self,
*/
extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Note: We cannot walk the stack properly until fixed up below.
ArtMethod* called = *sp;
DCHECK(called->IsNative()) << called->PrettyMethod(true);
- // Fix up a callee-save frame at the bottom of the stack (at `*sp`,
- // above the alloca region) while we check for optimization
- // annotations, thus allowing stack walking until the completion of
- // the JNI frame creation.
- //
- // Note however that the Generic JNI trampoline does not expect
- // exception being thrown at that stage.
- *sp = Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
- self->SetTopOfStack(sp);
uint32_t shorty_len = 0;
const char* shorty = called->GetShorty(&shorty_len);
- // Optimization annotations lookup does not try to resolve classes,
- // as this may throw an exception, which is not supported by the
- // Generic JNI trampoline at this stage; instead, method's
- // annotations' classes are looked up in the bootstrap class
- // loader's resolved types (which won't trigger an exception).
- CHECK(!self->IsExceptionPending());
- bool critical_native = called->IsAnnotatedWithCriticalNative();
- CHECK(!self->IsExceptionPending());
- bool fast_native = called->IsAnnotatedWithFastNative();
- CHECK(!self->IsExceptionPending());
+ bool critical_native = called->IsCriticalNative();
+ bool fast_native = called->IsFastNative();
bool normal_native = !critical_native && !fast_native;
- // Restore the initial ArtMethod pointer at `*sp`.
- *sp = called;
// Run the visitor and update sp.
BuildGenericJniFrameVisitor visitor(self,
@@ -2212,7 +2194,7 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod**
visitor.FinalizeHandleScope(self);
}
- // Fix up managed-stack things in Thread.
+ // Fix up managed-stack things in Thread. After this we can walk the stack.
self->SetTopOfStack(sp);
self->VerifyStack();
diff --git a/runtime/image.cc b/runtime/image.cc
index cf5feaca56..8f35d8474c 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '0', '\0' }; // strcmp() @FastNative.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '1', '\0' }; // @FastNative access flags.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 5164c85b60..1e55158a34 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2364,7 +2364,7 @@ class JNI {
// TODO: make this a hard register error in the future.
}
- const void* final_function_ptr = m->RegisterNative(fnPtr, is_fast);
+ const void* final_function_ptr = m->RegisterNative(fnPtr);
UNUSED(final_function_ptr);
}
return JNI_OK;
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 4d810dbce0..892c03912a 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1244,7 +1244,6 @@ ObjPtr<Method> Class::GetDeclaredMethodInternal(
// still return a synthetic method to handle situations like
// escalated visibility. We never return miranda methods that
// were synthesized by the runtime.
- constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
StackHandleScope<3> hs(self);
auto h_method_name = hs.NewHandle(name);
if (UNLIKELY(h_method_name == nullptr)) {
@@ -1264,11 +1263,10 @@ ObjPtr<Method> Class::GetDeclaredMethodInternal(
}
continue;
}
- auto modifiers = m.GetAccessFlags();
- if ((modifiers & kSkipModifiers) == 0) {
- return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
- }
- if ((modifiers & kAccMiranda) == 0) {
+ if (!m.IsMiranda()) {
+ if (!m.IsSynthetic()) {
+ return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+ }
result = &m; // Remember as potential result if it's not a miranda method.
}
}
@@ -1291,11 +1289,11 @@ ObjPtr<Method> Class::GetDeclaredMethodInternal(
}
continue;
}
- if ((modifiers & kSkipModifiers) == 0) {
+ DCHECK(!m.IsMiranda()); // Direct methods cannot be miranda methods.
+ if ((modifiers & kAccSynthetic) == 0) {
return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
- // Direct methods cannot be miranda methods, so this potential result must be synthetic.
- result = &m;
+ result = &m; // Remember as potential result.
}
}
return result != nullptr
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index bf49f51339..c545a9b7d5 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -286,7 +286,7 @@ class MANAGED Class FINAL : public Object {
// This does not necessarily mean that access checks are avoidable,
// since the class methods might still need to be run with access checks.
bool WasVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
+ return (GetAccessFlags() & kAccVerificationAttempted) != 0;
}
// Mark the class as having gone through a verification attempt.
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 4b790a0f03..d7d647b8fd 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -49,17 +49,21 @@ static constexpr uint32_t kAccClassIsProxy = 0x00040000; // class (de
// declaring class. This flag may only be applied to methods.
static constexpr uint32_t kAccObsoleteMethod = 0x00040000; // method (runtime)
// Used by a method to denote that its execution does not need to go through slow path interpreter.
-static constexpr uint32_t kAccSkipAccessChecks = 0x00080000; // method (dex only)
+static constexpr uint32_t kAccSkipAccessChecks = 0x00080000; // method (runtime, not native)
// Used by a class to denote that the verifier has attempted to check it at least once.
static constexpr uint32_t kAccVerificationAttempted = 0x00080000; // class (runtime)
-static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex only)
// This is set by the class linker during LinkInterfaceMethods. It is used by a method to represent
// that it was copied from its declaring class into another class. All methods marked kAccMiranda
// and kAccDefaultConflict will have this bit set. Any kAccDefault method contained in the methods_
// array of a concrete class will also have this bit set.
static constexpr uint32_t kAccCopied = 0x00100000; // method (runtime)
-static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
+static constexpr uint32_t kAccMiranda = 0x00200000; // method (runtime, not native)
static constexpr uint32_t kAccDefault = 0x00400000; // method (runtime)
+// Native method flags are set when linking the methods based on the presence of the
+// @dalvik.annotation.optimization.{Fast,Critical}Native annotations with build visibility.
+// Reuse the values of kAccSkipAccessChecks and kAccMiranda which are not used for native methods.
+static constexpr uint32_t kAccFastNative = 0x00080000; // method (runtime; native only)
+static constexpr uint32_t kAccCriticalNative = 0x00200000; // method (runtime; native only)
// Set by the JIT when clearing profiling infos to denote that a method was previously warm.
static constexpr uint32_t kAccPreviouslyWarm = 0x00800000; // method (runtime)
@@ -106,8 +110,9 @@ static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccP
// Valid (meaningful) bits for a method.
static constexpr uint32_t kAccValidMethodFlags = kAccPublic | kAccPrivate | kAccProtected |
kAccStatic | kAccFinal | kAccSynchronized | kAccBridge | kAccVarargs | kAccNative |
- kAccAbstract | kAccStrict | kAccSynthetic | kAccMiranda | kAccConstructor |
- kAccDeclaredSynchronized | kAccPreviouslyWarm;
+ kAccAbstract | kAccStrict | kAccSynthetic | kAccConstructor | kAccDeclaredSynchronized;
+static_assert(((kAccIntrinsic | kAccIntrinsicBits) & kAccValidMethodFlags) == 0,
+ "Intrinsic bits and valid dex file method access flags must not overlap.");
// Valid (meaningful) bits for a class (not interface).
// Note 1. These are positive bits. Other bits may have to be zero.
diff --git a/runtime/native/scoped_fast_native_object_access-inl.h b/runtime/native/scoped_fast_native_object_access-inl.h
index b2abc4691a..20ff76ea27 100644
--- a/runtime/native/scoped_fast_native_object_access-inl.h
+++ b/runtime/native/scoped_fast_native_object_access-inl.h
@@ -27,7 +27,7 @@ namespace art {
inline ScopedFastNativeObjectAccess::ScopedFastNativeObjectAccess(JNIEnv* env)
: ScopedObjectAccessAlreadyRunnable(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsAnnotatedWithFastNative());
+ DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
// Don't work with raw objects in non-runnable states.
DCHECK_EQ(Self()->GetState(), kRunnable);
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 88f1fc6991..9f553147c4 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -288,12 +288,17 @@ void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread
#if HAVE_TIMED_RWLOCK
// Attempt to rectify locks so that we dump thread list with required locks before exiting.
NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
+ // Increment gAborting before doing the thread list dump since we don't want any failures from
+ // AssertThreadSuspensionIsAllowable in cases where thread suspension is not allowed.
+ // See b/69044468.
+ ++gAborting;
Runtime* runtime = Runtime::Current();
std::ostringstream ss;
ss << "Thread suspend timeout\n";
Locks::mutator_lock_->Dump(ss);
ss << "\n";
runtime->GetThreadList()->Dump(ss);
+ --gAborting;
LOG(FATAL) << ss.str();
exit(0);
}
@@ -302,6 +307,8 @@ NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
// individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then
// we use sched_yield instead of calling usleep.
+// Although there is the possibility, here and elsewhere, that usleep could return -1 and
+// errno = EINTR, there should be no problem if interrupted, so we do not check.
static void ThreadSuspendSleep(useconds_t delay_us) {
if (delay_us == 0) {
sched_yield();
diff --git a/test/669-moveable-string-class-equals/run b/test/669-moveable-string-class-equals/run
index d0ab6f8d55..7c74d8ca0e 100755
--- a/test/669-moveable-string-class-equals/run
+++ b/test/669-moveable-string-class-equals/run
@@ -16,4 +16,4 @@
# Run without image, so that String.class is moveable.
# Reduce heap size to force more frequent GCs.
-${RUN} --no-image --no-dex2oat --runtime-option -Xmx16m "$@"
+${RUN} --no-image --runtime-option -Xmx16m "$@"
diff --git a/test/669-moveable-string-class-equals/src/Main.java b/test/669-moveable-string-class-equals/src/Main.java
index 4badadeae4..d182d51a25 100644
--- a/test/669-moveable-string-class-equals/src/Main.java
+++ b/test/669-moveable-string-class-equals/src/Main.java
@@ -43,6 +43,10 @@ public class Main {
array[i] = "V" + i;
}
+ // Continually check string equality between a newly allocated String and an
+ // already allocated String with the same contents while allocating over 128MiB
+ // memory (with heap size limited to 16MiB), ensuring we run GC and stress the
+ // instanceof check in the String.equals() implementation.
for (int count = 0; count != 128 * 1024; ++count) {
for (int i = 0; i != length; ++i) {
allocateAtLeast1KiB();
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 53b509336e..fd9ad0bb0f 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -35,7 +35,7 @@ fi
using_jack=$(get_build_var ANDROID_COMPILE_WITH_JACK)
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target libjdwp"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target"
mode="target"
j_arg="-j$(nproc)"
showcommands=
@@ -70,16 +70,23 @@ fi
extra_args=SOONG_ALLOW_MISSING_DEPENDENCIES=true
if [[ $mode == "host" ]]; then
- make_command="make $j_arg $extra_args $showcommands build-art-host-tests $common_targets dx-tests"
- make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "
- make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
- make_command+=" libwrapagentpropertiesd libwrapagentproperties"
+ make_command="make $j_arg $extra_args $showcommands build-art-host-tests $common_targets"
+ make_command+=" dx-tests"
+ mode_suffix="-host"
elif [[ $mode == "target" ]]; then
make_command="make $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
- make_command+=" libjavacrypto libjavacoretests libnetd_client linker toybox toolbox sh"
+ make_command+=" libjavacrypto-target libnetd_client-target linker toybox toolbox sh"
make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ "
make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt"
+ mode_suffix="-target"
fi
+mode_specific_libraries="libjavacoretests libjdwp libwrapagentproperties libwrapagentpropertiesd"
+for LIB in ${mode_specific_libraries} ; do
+ make_command+=" $LIB${mode_suffix}"
+done
+
+
+
echo "Executing $make_command"
$make_command
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index 646a96adbb..354bee81af 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -91,5 +91,11 @@
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
"org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+},
+{
+ description: "Test is flaky",
+ result: EXEC_FAILED,
+ bug: 69121056,
+ name: "org.apache.harmony.jpda.tests.jdwp.ObjectReference.IsCollectedTest#testIsCollected001"
}
]
diff --git a/tools/libjdwp_oj_art_failures.txt b/tools/libjdwp_oj_art_failures.txt
index e0f243ccfb..787c4d28fb 100644
--- a/tools/libjdwp_oj_art_failures.txt
+++ b/tools/libjdwp_oj_art_failures.txt
@@ -61,5 +61,11 @@
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
"org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
"org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+},
+{
+ description: "Test is flaky",
+ result: EXEC_FAILED,
+ bug: 69121056,
+ name: "org.apache.harmony.jpda.tests.jdwp.ObjectReference.IsCollectedTest#testIsCollected001"
}
]