summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/quick/quick_cfi_test.cc2
-rw-r--r--compiler/dex/quick/x86/quick_assemble_x86_test.cc2
-rw-r--r--compiler/driver/compiler_driver.cc169
-rw-r--r--compiler/driver/compiler_options.cc6
-rw-r--r--compiler/driver/compiler_options.h14
-rw-r--r--compiler/image_writer.cc54
-rw-r--r--compiler/image_writer.h5
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/optimizing/graph_visualizer.cc40
-rw-r--r--compiler/optimizing/inliner.cc50
-rw-r--r--compiler/optimizing/instruction_simplifier.cc13
-rw-r--r--compiler/optimizing/nodes.cc41
-rw-r--r--compiler/optimizing/nodes.h128
-rw-r--r--compiler/optimizing/optimizing_compiler.cc55
-rw-r--r--compiler/optimizing/reference_type_propagation.cc450
-rw-r--r--compiler/optimizing/reference_type_propagation.h11
-rw-r--r--compiler/utils/x86/assembler_x86.cc8
-rw-r--r--compiler/utils/x86/assembler_x86.h1
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc6
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc8
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h1
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc6
-rw-r--r--dex2oat/dex2oat.cc32
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc1
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S75
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc1
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S73
-rw-r--r--runtime/arch/mips/entrypoints_direct_mips.h3
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc2
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S85
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc1
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S83
-rw-r--r--runtime/arch/stub_test.cc58
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc4
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S79
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.S1
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S99
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/check_jni.cc44
-rw-r--r--runtime/class_linker.cc428
-rw-r--r--runtime/class_linker.h94
-rw-r--r--runtime/class_table.cc148
-rw-r--r--runtime/class_table.h122
-rw-r--r--runtime/debugger.cc38
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h11
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h3
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc12
-rw-r--r--runtime/entrypoints_order_test.cc3
-rw-r--r--runtime/instrumentation.cc26
-rw-r--r--runtime/mirror/class-inl.h27
-rw-r--r--runtime/mirror/class.h26
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/read_barrier_c.h5
-rw-r--r--runtime/thread.cc1
-rw-r--r--test/441-checker-inliner/src/Main.java33
-rw-r--r--test/450-checker-types/src/Main.java108
-rw-r--r--tools/libcore_failures.txt7
60 files changed, 2047 insertions, 769 deletions
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index dd68dd40c6..8318b526b6 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -56,6 +56,8 @@ class QuickCFITest : public CFITest {
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
+ CompilerOptions::kDefaultInlineDepthLimit,
+ CompilerOptions::kDefaultInlineMaxCodeUnits,
false,
CompilerOptions::kDefaultTopKProfileThreshold,
false,
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index 798e23fbac..98e9f38d52 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -39,6 +39,8 @@ class QuickAssembleX86TestBase : public testing::Test {
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
+ CompilerOptions::kDefaultInlineDepthLimit,
+ CompilerOptions::kDefaultInlineMaxCodeUnits,
false,
CompilerOptions::kDefaultTopKProfileThreshold,
false,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a35f306612..affa52a37a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -690,66 +690,76 @@ bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const
return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end();
}
-static void ResolveExceptionsForMethod(
- ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
- if (code_item == nullptr) {
- return; // native or abstract method
- }
- if (code_item->tries_size_ == 0) {
- return; // nothing to process
- }
- const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
- size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list);
- for (size_t i = 0; i < num_encoded_catch_handlers; i++) {
- int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list);
- bool has_catch_all = false;
- if (encoded_catch_handler_size <= 0) {
- encoded_catch_handler_size = -encoded_catch_handler_size;
- has_catch_all = true;
- }
- for (int32_t j = 0; j < encoded_catch_handler_size; j++) {
- uint16_t encoded_catch_handler_handlers_type_idx =
- DecodeUnsignedLeb128(&encoded_catch_handler_list);
- // Add to set of types to resolve if not already in the dex cache resolved types
- if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
- exceptions_to_resolve.insert(
- std::pair<uint16_t, const DexFile*>(encoded_catch_handler_handlers_type_idx,
- method_handle->GetDexFile()));
+class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
+ public:
+ ResolveCatchBlockExceptionsClassVisitor(
+ std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
+ : exceptions_to_resolve_(exceptions_to_resolve) {}
+
+ void ResolveExceptionsForMethod(ArtMethod* method_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
+ if (code_item == nullptr) {
+ return; // native or abstract method
+ }
+ if (code_item->tries_size_ == 0) {
+ return; // nothing to process
+ }
+ const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0);
+ size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ for (size_t i = 0; i < num_encoded_catch_handlers; i++) {
+ int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list);
+ bool has_catch_all = false;
+ if (encoded_catch_handler_size <= 0) {
+ encoded_catch_handler_size = -encoded_catch_handler_size;
+ has_catch_all = true;
+ }
+ for (int32_t j = 0; j < encoded_catch_handler_size; j++) {
+ uint16_t encoded_catch_handler_handlers_type_idx =
+ DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ // Add to set of types to resolve if not already in the dex cache resolved types
+ if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
+ exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx,
+ method_handle->GetDexFile());
+ }
+ // ignore address associated with catch handler
+ DecodeUnsignedLeb128(&encoded_catch_handler_list);
+ }
+ if (has_catch_all) {
+ // ignore catch all address
+ DecodeUnsignedLeb128(&encoded_catch_handler_list);
}
- // ignore address associated with catch handler
- DecodeUnsignedLeb128(&encoded_catch_handler_list);
- }
- if (has_catch_all) {
- // ignore catch all address
- DecodeUnsignedLeb128(&encoded_catch_handler_list);
}
}
-}
-static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- auto* exceptions_to_resolve =
- reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
- const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : c->GetVirtualMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m, *exceptions_to_resolve);
+ virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ ResolveExceptionsForMethod(&m);
+ }
+ for (auto& m : c->GetDirectMethods(pointer_size)) {
+ ResolveExceptionsForMethod(&m);
+ }
+ return true;
}
- for (auto& m : c->GetDirectMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m, *exceptions_to_resolve);
+
+ private:
+ std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve_;
+};
+
+class RecordImageClassesVisitor : public ClassVisitor {
+ public:
+ explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
+ : image_classes_(image_classes) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::string temp;
+ image_classes_->insert(klass->GetDescriptor(&temp));
+ return true;
}
- return true;
-}
-static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- std::unordered_set<std::string>* image_classes =
- reinterpret_cast<std::unordered_set<std::string>*>(arg);
- std::string temp;
- image_classes->insert(klass->GetDescriptor(&temp));
- return true;
-}
+ private:
+ std::unordered_set<std::string>* const image_classes_;
+};
// Make a list of descriptors for classes to include in the image
void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
@@ -787,8 +797,8 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
- class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor,
- &unresolved_exception_types);
+ ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types);
+ class_linker->VisitClasses(&visitor);
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
@@ -811,7 +821,8 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
// We walk the roots looking for classes so that we'll pick up the
// above classes plus any classes them depend on such super
// classes, interfaces, and the required ClassLinker roots.
- class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get());
+ RecordImageClassesVisitor visitor(image_classes_.get());
+ class_linker->VisitClasses(&visitor);
CHECK_NE(image_classes_->size(), 0U);
}
@@ -899,6 +910,29 @@ class ClinitImageUpdate {
}
private:
+ class FindImageClassesVisitor : public ClassVisitor {
+ public:
+ explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::string temp;
+ const char* name = klass->GetDescriptor(&temp);
+ if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
+ data_->image_classes_.push_back(klass);
+ } else {
+ // Check whether it is initialized and has a clinit. They must be kept, too.
+ if (klass->IsInitialized() && klass->FindClassInitializer(
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) {
+ data_->image_classes_.push_back(klass);
+ }
+ }
+ return true;
+ }
+
+ private:
+ ClinitImageUpdate* const data_;
+ };
+
ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
ClassLinker* linker)
SHARED_REQUIRES(Locks::mutator_lock_) :
@@ -915,25 +949,8 @@ class ClinitImageUpdate {
// Find all the already-marked classes.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- linker->VisitClasses(FindImageClasses, this);
- }
-
- static bool FindImageClasses(mirror::Class* klass, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- ClinitImageUpdate* data = reinterpret_cast<ClinitImageUpdate*>(arg);
- std::string temp;
- const char* name = klass->GetDescriptor(&temp);
- if (data->image_class_descriptors_->find(name) != data->image_class_descriptors_->end()) {
- data->image_classes_.push_back(klass);
- } else {
- // Check whether it is initialized and has a clinit. They must be kept, too.
- if (klass->IsInitialized() && klass->FindClassInitializer(
- Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) {
- data->image_classes_.push_back(klass);
- }
- }
-
- return true;
+ FindImageClassesVisitor visitor(this);
+ linker->VisitClasses(&visitor);
}
void VisitClinitClassesObject(mirror::Object* object) const
@@ -1731,7 +1748,7 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
: manager_(manager) {}
- virtual void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
+ void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) {
ATRACE_CALL();
Thread* const self = Thread::Current();
jobject jclass_loader = manager_->GetClassLoader();
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 226e6b7952..3f5a1eabb6 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -27,6 +27,8 @@ CompilerOptions::CompilerOptions()
small_method_threshold_(kDefaultSmallMethodThreshold),
tiny_method_threshold_(kDefaultTinyMethodThreshold),
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
+ inline_depth_limit_(kDefaultInlineDepthLimit),
+ inline_max_code_units_(kDefaultInlineMaxCodeUnits),
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
@@ -52,6 +54,8 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter,
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
+ size_t inline_depth_limit,
+ size_t inline_max_code_units,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
@@ -71,6 +75,8 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter,
small_method_threshold_(small_method_threshold),
tiny_method_threshold_(tiny_method_threshold),
num_dex_methods_threshold_(num_dex_methods_threshold),
+ inline_depth_limit_(inline_depth_limit),
+ inline_max_code_units_(inline_max_code_units),
include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
debuggable_(debuggable),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index fe681e2a53..17b19dd51e 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -51,6 +51,8 @@ class CompilerOptions FINAL {
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultGenerateDebugInfo = kIsDebugBuild;
static const bool kDefaultIncludePatchInformation = false;
+ static const size_t kDefaultInlineDepthLimit = 3;
+ static const size_t kDefaultInlineMaxCodeUnits = 18;
CompilerOptions();
~CompilerOptions();
@@ -61,6 +63,8 @@ class CompilerOptions FINAL {
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
+ size_t inline_depth_limit,
+ size_t inline_max_code_units,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
@@ -137,6 +141,14 @@ class CompilerOptions FINAL {
return num_dex_methods_threshold_;
}
+ size_t GetInlineDepthLimit() const {
+ return inline_depth_limit_;
+ }
+
+ size_t GetInlineMaxCodeUnits() const {
+ return inline_max_code_units_;
+ }
+
double GetTopKProfileThreshold() const {
return top_k_profile_threshold_;
}
@@ -202,6 +214,8 @@ class CompilerOptions FINAL {
const size_t small_method_threshold_;
const size_t tiny_method_threshold_;
const size_t num_dex_methods_threshold_;
+ const size_t inline_depth_limit_;
+ const size_t inline_max_code_units_;
const bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
const double top_k_profile_threshold_;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 293a488ccf..dda36fa2ef 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -539,16 +539,19 @@ bool ImageWriter::AllocMemory() {
return true;
}
+class ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
+ public:
+ bool Visit(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(Thread::Current());
+ mirror::Class::ComputeName(hs.NewHandle(c));
+ return true;
+ }
+};
+
void ImageWriter::ComputeLazyFieldsForImageClasses() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
-}
-
-bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- mirror::Class::ComputeName(hs.NewHandle(c));
- return true;
+ ComputeLazyFieldsForClassesVisitor visitor;
+ class_linker->VisitClassesWithoutClassesLock(&visitor);
}
void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
@@ -592,9 +595,20 @@ bool ImageWriter::IsImageClass(Class* klass) {
return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp));
}
-struct NonImageClasses {
- ImageWriter* image_writer;
- std::set<std::string>* non_image_classes;
+class NonImageClassesVisitor : public ClassVisitor {
+ public:
+ explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
+
+ bool Visit(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!image_writer_->IsImageClass(klass)) {
+ std::string temp;
+ non_image_classes_.insert(klass->GetDescriptor(&temp));
+ }
+ return true;
+ }
+
+ std::set<std::string> non_image_classes_;
+ ImageWriter* const image_writer_;
};
void ImageWriter::PruneNonImageClasses() {
@@ -606,14 +620,11 @@ void ImageWriter::PruneNonImageClasses() {
Thread* self = Thread::Current();
// Make a list of classes we would like to prune.
- std::set<std::string> non_image_classes;
- NonImageClasses context;
- context.image_writer = this;
- context.non_image_classes = &non_image_classes;
- class_linker->VisitClasses(NonImageClassesVisitor, &context);
+ NonImageClassesVisitor visitor(this);
+ class_linker->VisitClasses(&visitor);
// Remove the undesired classes from the class roots.
- for (const std::string& it : non_image_classes) {
+ for (const std::string& it : visitor.non_image_classes_) {
bool result = class_linker->RemoveClass(it.c_str(), nullptr);
DCHECK(result);
}
@@ -669,15 +680,6 @@ void ImageWriter::PruneNonImageClasses() {
class_linker->DropFindArrayClassCache();
}
-bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
- NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg);
- if (!context->image_writer->IsImageClass(klass)) {
- std::string temp;
- context->non_image_classes->insert(klass->GetDescriptor(&temp));
- }
- return true;
-}
-
void ImageWriter::CheckNonImageClassesRemoved() {
if (compiler_driver_.GetImageClasses() != nullptr) {
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 42b1cbf58a..cabd918354 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -217,8 +217,6 @@ class ImageWriter FINAL {
// Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
void ComputeLazyFieldsForImageClasses()
SHARED_REQUIRES(Locks::mutator_lock_);
- static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
// Wire dex cache resolved strings to strings in the image to avoid runtime resolution.
void ComputeEagerResolvedStrings() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -227,8 +225,6 @@ class ImageWriter FINAL {
// Remove unwanted classes from various roots.
void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
- static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
// Verify unwanted classes removed.
void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -376,6 +372,7 @@ class ImageWriter FINAL {
friend class FixupClassVisitor;
friend class FixupRootVisitor;
friend class FixupVisitor;
+ friend class NonImageClassesVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
};
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index d70211f9a9..c95bac24fd 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -71,6 +71,8 @@ JitCompiler::JitCompiler() : total_time_(0) {
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
+ CompilerOptions::kDefaultInlineDepthLimit,
+ CompilerOptions::kDefaultInlineMaxCodeUnits,
/* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
Runtime::Current()->IsDebuggable(),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index c98a5f8ba8..88dc29e6ab 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -183,7 +183,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(112 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(113 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index afea40316c..069a7a460b 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -386,6 +386,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("recursive") << std::boolalpha
<< invoke->IsRecursive()
<< std::noboolalpha;
+ StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
@@ -396,6 +397,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
return strcmp(pass_name_, name) == 0;
}
+ bool IsReferenceTypePropagationPass() {
+ return strstr(pass_name_, ReferenceTypePropagation::kReferenceTypePropagationPassName)
+ != nullptr;
+ }
+
void PrintInstruction(HInstruction* instruction) {
output_ << instruction->DebugName();
if (instruction->InputCount() > 0) {
@@ -459,27 +465,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
} else {
StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
}
- } else if (IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName)
- && is_after_pass_) {
- if (instruction->GetType() == Primitive::kPrimNot) {
- if (instruction->IsLoadClass()) {
- ReferenceTypeInfo info = instruction->AsLoadClass()->GetLoadedClassRTI();
- ScopedObjectAccess soa(Thread::Current());
- if (info.GetTypeHandle().GetReference() != nullptr) {
- StartAttributeStream("klass") << PrettyClass(info.GetTypeHandle().Get());
- } else {
- StartAttributeStream("klass") << "unresolved";
- }
- } else {
- ReferenceTypeInfo info = instruction->GetReferenceTypeInfo();
- if (info.IsTop()) {
- StartAttributeStream("klass") << "java.lang.Object";
- } else {
- ScopedObjectAccess soa(Thread::Current());
- StartAttributeStream("klass") << PrettyClass(info.GetTypeHandle().Get());
- }
- StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
- }
+ } else if (IsReferenceTypePropagationPass()
+ && (instruction->GetType() == Primitive::kPrimNot)) {
+ ReferenceTypeInfo info = instruction->IsLoadClass()
+ ? instruction->AsLoadClass()->GetLoadedClassRTI()
+ : instruction->GetReferenceTypeInfo();
+ ScopedObjectAccess soa(Thread::Current());
+ if (info.IsValid()) {
+ StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get());
+ StartAttributeStream("can_be_null")
+ << std::boolalpha << instruction->CanBeNull() << std::noboolalpha;
+ StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
+ } else {
+ DCHECK(!is_after_pass_) << "Type info should be valid after reference type propagation";
}
}
if (disasm_info_ != nullptr) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index c185b5887b..01065959d8 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -22,8 +22,10 @@
#include "constant_folding.h"
#include "dead_code_elimination.h"
#include "driver/compiler_driver-inl.h"
+#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
+#include "intrinsics.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -38,9 +40,6 @@
namespace art {
-static constexpr int kMaxInlineCodeUnits = 18;
-static constexpr int kDepthLimit = 3;
-
void HInliner::Run() {
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
@@ -109,10 +108,8 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol
receiver = receiver->InputAt(0);
}
ReferenceTypeInfo info = receiver->GetReferenceTypeInfo();
- if (info.IsTop()) {
- // We have no information on the receiver.
- return nullptr;
- } else if (!info.IsExact()) {
+ DCHECK(info.IsValid()) << "Invalid RTI for " << receiver->DebugName();
+ if (!info.IsExact()) {
// We currently only support inlining with known receivers.
// TODO: Remove this check, we should be able to inline final methods
// on unknown receivers.
@@ -221,7 +218,8 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, uint32_t method_index) con
return false;
}
- if (code_item->insns_size_in_code_units_ > kMaxInlineCodeUnits) {
+ size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " is too big to inline";
return false;
@@ -273,11 +271,11 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
-
+ ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
DexCompilationUnit dex_compilation_unit(
nullptr,
caller_compilation_unit_.GetClassLoader(),
- caller_compilation_unit_.GetClassLinker(),
+ class_linker,
*resolved_method->GetDexFile(),
code_item,
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
@@ -358,8 +356,10 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
HConstantFolding fold(callee_graph);
ReferenceTypePropagation type_propagation(callee_graph, handles_);
InstructionSimplifier simplify(callee_graph, stats_);
+ IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
HOptimization* optimizations[] = {
+ &intrinsics,
&dce,
&fold,
&type_propagation,
@@ -371,7 +371,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
optimization->Run();
}
- if (depth_ + 1 < kDepthLimit) {
+ if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
HInliner inliner(callee_graph,
outer_compilation_unit_,
dex_compilation_unit,
@@ -450,7 +450,33 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
}
}
- callee_graph->InlineInto(graph_, invoke_instruction);
+ HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+
+ // When merging the graph we might create a new NullConstant in the caller graph which does
+ // not have the chance to be typed. We assign the correct type here so that we can keep the
+ // assertion that every reference has a valid type. This also simplifies checks along the way.
+ HNullConstant* null_constant = graph_->GetNullConstant();
+ if (!null_constant->GetReferenceTypeInfo().IsValid()) {
+ ReferenceTypeInfo::TypeHandle obj_handle =
+ handles_->NewHandle(class_linker->GetClassRoot(ClassLinker::kJavaLangObject));
+ null_constant->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(obj_handle, false /* is_exact */));
+ }
+
+ if ((return_replacement != nullptr)
+ && (return_replacement->GetType() == Primitive::kPrimNot)) {
+ if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
+ // Make sure that we have a valid type for the return. We may get an invalid one when
+ // we inline invokes with multiple branches and create a Phi for the result.
+ // TODO: we could be more precise by merging the phi inputs but that requires
+ // some functionality from the reference type propagation.
+ DCHECK(return_replacement->IsPhi());
+ ReferenceTypeInfo::TypeHandle return_handle =
+ handles_->NewHandle(resolved_method->GetReturnType());
+ return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
+ return_handle, return_handle->IsFinal() /* is_exact */));
+ }
+ }
return true;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index b30b6c7bae..d3911456fb 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -195,16 +195,17 @@ bool InstructionSimplifierVisitor::IsDominatedByInputNullCheck(HInstruction* ins
// Returns whether doing a type test between the class of `object` against `klass` has
// a statically known outcome. The result of the test is stored in `outcome`.
static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bool* outcome) {
- if (!klass->IsResolved()) {
- // If the class couldn't be resolve it's not safe to compare against it. It's
- // default type would be Top which might be wider that the actual class type
- // and thus producing wrong results.
+ DCHECK(!object->IsNullConstant()) << "Null constants should be special cased";
+ ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo();
+ ScopedObjectAccess soa(Thread::Current());
+ if (!obj_rti.IsValid()) {
+ // We run the simplifier before the reference type propagation so type info might not be
+ // available.
return false;
}
- ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo();
ReferenceTypeInfo class_rti = klass->GetLoadedClassRTI();
- ScopedObjectAccess soa(Thread::Current());
+ DCHECK(class_rti.IsValid() && class_rti.IsExact());
if (class_rti.IsSupertypeOf(obj_rti)) {
*outcome = true;
return true;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 519fa005a6..188cb49f6a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1485,7 +1485,7 @@ void HGraph::DeleteDeadBlock(HBasicBlock* block) {
blocks_.Put(block->GetBlockId(), nullptr);
}
-void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
+HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
DCHECK(HasExitBlock()) << "Unimplemented scenario";
// Update the environments in this graph to have the invoke's environment
// as parent.
@@ -1510,6 +1510,7 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
outer_graph->SetHasBoundsChecks(true);
}
+ HInstruction* return_value = nullptr;
if (GetBlocks().Size() == 3) {
// Simple case of an entry block, a body block, and an exit block.
// Put the body block's instruction into `invoke`'s block.
@@ -1524,7 +1525,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// Replace the invoke with the return value of the inlined graph.
if (last->IsReturn()) {
- invoke->ReplaceWith(last->InputAt(0));
+ return_value = last->InputAt(0);
+ invoke->ReplaceWith(return_value);
} else {
DCHECK(last->IsReturnVoid());
}
@@ -1546,7 +1548,6 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// Update all predecessors of the exit block (now the `to` block)
// to not `HReturn` but `HGoto` instead.
- HInstruction* return_value = nullptr;
bool returns_void = to->GetPredecessors().Get(0)->GetLastInstruction()->IsReturnVoid();
if (to->GetPredecessors().Size() == 1) {
HBasicBlock* predecessor = to->GetPredecessors().Get(0);
@@ -1680,6 +1681,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// Finally remove the invoke from the caller.
invoke->GetBlock()->RemoveInstruction(invoke);
+
+ return return_value;
}
/*
@@ -1757,11 +1760,39 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
}
}
+void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) {
+ if (kIsDebugBuild) {
+ DCHECK_EQ(GetType(), Primitive::kPrimNot);
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(rti.IsValid()) << "Invalid RTI for " << DebugName();
+ if (IsBoundType()) {
+ // Having the test here spares us from making the method virtual just for
+ // the sake of a DCHECK.
+ ReferenceTypeInfo upper_bound_rti = AsBoundType()->GetUpperBound();
+ DCHECK(upper_bound_rti.IsSupertypeOf(rti))
+ << " upper_bound_rti: " << upper_bound_rti
+ << " rti: " << rti;
+ DCHECK(!upper_bound_rti.GetTypeHandle()->IsFinal() || rti.IsExact());
+ }
+ }
+ reference_type_info_ = rti;
+}
+
+ReferenceTypeInfo::ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {}
+
+ReferenceTypeInfo::ReferenceTypeInfo(TypeHandle type_handle, bool is_exact)
+ : type_handle_(type_handle), is_exact_(is_exact) {
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(IsValidHandle(type_handle));
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) {
ScopedObjectAccess soa(Thread::Current());
os << "["
- << " is_top=" << rhs.IsTop()
- << " type=" << (rhs.IsTop() ? "?" : PrettyClass(rhs.GetTypeHandle().Get()))
+ << " is_valid=" << rhs.IsValid()
+ << " type=" << (!rhs.IsValid() ? "?" : PrettyClass(rhs.GetTypeHandle().Get()))
<< " is_exact=" << rhs.IsExact()
<< " ]";
return os;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7f446d4cf6..003900c8a6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -210,7 +210,9 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
void ComputeTryBlockInformation();
// Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
- void InlineInto(HGraph* outer_graph, HInvoke* invoke);
+ // Returns the instruction used to replace the invoke expression or null if the
+ // invoke is for a void method.
+ HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke);
// Need to add a couple of blocks to test if the loop body is entered and
// put deoptimization instructions, etc.
@@ -306,7 +308,12 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
// already, it is created and inserted into the graph. This method is only for
// integral types.
HConstant* GetConstant(Primitive::Type type, int64_t value);
+
+ // TODO: This is problematic for the consistency of reference type propagation
+ // because it can be created anytime after the pass and thus it will be left
+ // with an invalid type.
HNullConstant* GetNullConstant();
+
HIntConstant* GetIntConstant(int32_t value) {
return CreateConstant(value, &cached_int_constants_);
}
@@ -1460,79 +1467,64 @@ class ReferenceTypeInfo : ValueObject {
public:
typedef Handle<mirror::Class> TypeHandle;
- static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- if (type_handle->IsObjectClass()) {
- // Override the type handle to be consistent with the case when we get to
- // Top but don't have the Object class available. It avoids having to guess
- // what value the type_handle has when it's Top.
- return ReferenceTypeInfo(TypeHandle(), is_exact, true);
- } else {
- return ReferenceTypeInfo(type_handle, is_exact, false);
- }
+ static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) {
+ // The constructor will check that the type_handle is valid.
+ return ReferenceTypeInfo(type_handle, is_exact);
}
- static ReferenceTypeInfo CreateTop(bool is_exact) {
- return ReferenceTypeInfo(TypeHandle(), is_exact, true);
+ static ReferenceTypeInfo CreateInvalid() { return ReferenceTypeInfo(); }
+
+ static bool IsValidHandle(TypeHandle handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ return handle.GetReference() != nullptr;
}
+ bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return IsValidHandle(type_handle_);
+ }
bool IsExact() const { return is_exact_; }
- bool IsTop() const { return is_top_; }
+
+ bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsObjectClass();
+ }
bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
- return !IsTop() && GetTypeHandle()->IsInterface();
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsInterface();
}
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
- if (IsTop()) {
- // Top (equivalent for java.lang.Object) is supertype of anything.
- return true;
- }
- if (rti.IsTop()) {
- // If we get here `this` is not Top() so it can't be a supertype.
- return false;
- }
+ DCHECK(IsValid());
+ DCHECK(rti.IsValid());
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
- // (e.g. tops are equal but they can be the result of a merge).
+ // (because the type can be the result of a merge).
bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) {
- if (IsExact() != rti.IsExact()) {
- return false;
- }
- if (IsTop() && rti.IsTop()) {
- // `Top` means java.lang.Object, so the types are equivalent.
+ if (!IsValid() && !rti.IsValid()) {
+ // Invalid types are equal.
return true;
}
- if (IsTop() || rti.IsTop()) {
- // If only one is top or object than they are not equivalent.
- // NB: We need this extra check because the type_handle of `Top` is invalid
- // and we cannot inspect its reference.
+ if (!IsValid() || !rti.IsValid()) {
+ // One is valid, the other not.
return false;
}
-
- // Finally check the types.
- return GetTypeHandle().Get() == rti.GetTypeHandle().Get();
+ return IsExact() == rti.IsExact()
+ && GetTypeHandle().Get() == rti.GetTypeHandle().Get();
}
private:
- ReferenceTypeInfo() : ReferenceTypeInfo(TypeHandle(), false, true) {}
- ReferenceTypeInfo(TypeHandle type_handle, bool is_exact, bool is_top)
- : type_handle_(type_handle), is_exact_(is_exact), is_top_(is_top) {}
+ ReferenceTypeInfo();
+ ReferenceTypeInfo(TypeHandle type_handle, bool is_exact);
// The class of the object.
TypeHandle type_handle_;
// Whether or not the type is exact or a superclass of the actual type.
// Whether or not we have any information about this type.
bool is_exact_;
- // A true value here means that the object type should be java.lang.Object.
- // We don't have access to the corresponding mirror object every time so this
- // flag acts as a substitute. When true, the TypeHandle refers to a null
- // pointer and should not be used.
- bool is_top_;
};
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
@@ -1550,7 +1542,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
live_interval_(nullptr),
lifetime_position_(kNoLifetime),
side_effects_(side_effects),
- reference_type_info_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {}
+ reference_type_info_(ReferenceTypeInfo::CreateInvalid()) {}
virtual ~HInstruction() {}
@@ -1596,6 +1588,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
// Does not apply for all instructions, but having this at top level greatly
// simplifies the null check elimination.
+ // TODO: Consider merging can_be_null into ReferenceTypeInfo.
virtual bool CanBeNull() const {
DCHECK_EQ(GetType(), Primitive::kPrimNot) << "CanBeNull only applies to reference types";
return true;
@@ -1606,10 +1599,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
return false;
}
- void SetReferenceTypeInfo(ReferenceTypeInfo reference_type_info) {
- DCHECK_EQ(GetType(), Primitive::kPrimNot);
- reference_type_info_ = reference_type_info;
- }
+ void SetReferenceTypeInfo(ReferenceTypeInfo rti);
ReferenceTypeInfo GetReferenceTypeInfo() const {
DCHECK_EQ(GetType(), Primitive::kPrimNot);
@@ -3904,7 +3894,7 @@ class HLoadClass : public HExpression<1> {
is_referrers_class_(is_referrers_class),
dex_pc_(dex_pc),
generate_clinit_check_(false),
- loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {
+ loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
SetRawInputAt(0, current_method);
}
@@ -3955,10 +3945,6 @@ class HLoadClass : public HExpression<1> {
loaded_class_rti_ = rti;
}
- bool IsResolved() {
- return loaded_class_rti_.IsExact();
- }
-
const DexFile& GetDexFile() { return dex_file_; }
bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; }
@@ -4201,27 +4187,43 @@ class HInstanceOf : public HExpression<2> {
class HBoundType : public HExpression<1> {
public:
- HBoundType(HInstruction* input, ReferenceTypeInfo bound_type)
+ // Constructs an HBoundType with the given upper_bound.
+ // Ensures that the upper_bound is valid.
+ HBoundType(HInstruction* input, ReferenceTypeInfo upper_bound, bool upper_can_be_null)
: HExpression(Primitive::kPrimNot, SideEffects::None()),
- bound_type_(bound_type) {
+ upper_bound_(upper_bound),
+ upper_can_be_null_(upper_can_be_null),
+ can_be_null_(upper_can_be_null) {
DCHECK_EQ(input->GetType(), Primitive::kPrimNot);
SetRawInputAt(0, input);
+ SetReferenceTypeInfo(upper_bound_);
}
- const ReferenceTypeInfo& GetBoundType() const { return bound_type_; }
+ // GetUpper* should only be used in reference type propagation.
+ const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
+ bool GetUpperCanBeNull() const { return upper_can_be_null_; }
- bool CanBeNull() const OVERRIDE {
- // `null instanceof ClassX` always return false so we can't be null.
- return false;
+ void SetCanBeNull(bool can_be_null) {
+ DCHECK(upper_can_be_null_ || !can_be_null);
+ can_be_null_ = can_be_null;
}
+ bool CanBeNull() const OVERRIDE { return can_be_null_; }
+
DECLARE_INSTRUCTION(BoundType);
private:
// Encodes the most upper class that this instruction can have. In other words
- // it is always the case that GetBoundType().IsSupertypeOf(GetReferenceType()).
- // It is used to bound the type in cases like `if (x instanceof ClassX) {}`
- const ReferenceTypeInfo bound_type_;
+ // it is always the case that GetUpperBound().IsSupertypeOf(GetReferenceType()).
+ // It is used to bound the type in cases like:
+ // if (x instanceof ClassX) {
+ // // uper_bound_ will be ClassX
+ // }
+ const ReferenceTypeInfo upper_bound_;
+ // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
+ // is false then can_be_null_ cannot be true).
+ const bool upper_can_be_null_;
+ bool can_be_null_;
DISALLOW_COPY_AND_ASSIGN(HBoundType);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1c0123e188..6a50b7d4a4 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -369,6 +369,36 @@ static void RunOptimizations(HOptimization* optimizations[],
}
}
+static void MaybeRunInliner(HGraph* graph,
+ CompilerDriver* driver,
+ OptimizingCompilerStats* stats,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) {
+ const CompilerOptions& compiler_options = driver->GetCompilerOptions();
+ bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
+ && (compiler_options.GetInlineMaxCodeUnits() > 0);
+ if (!should_inline) {
+ return;
+ }
+
+ ArenaAllocator* arena = graph->GetArena();
+ HInliner* inliner = new (arena) HInliner(
+ graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
+ ReferenceTypePropagation* type_propagation =
+ new (arena) ReferenceTypePropagation(graph, handles,
+ "reference_type_propagation_after_inlining");
+
+ HOptimization* optimizations[] = {
+ inliner,
+ // Run another type propagation phase: inlining will open up more opportunities
+ // to remove checkcast/instanceof and null checks.
+ type_propagation,
+ };
+
+ RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
+}
+
static void RunOptimizations(HGraph* graph,
CompilerDriver* driver,
OptimizingCompilerStats* stats,
@@ -383,10 +413,6 @@ static void RunOptimizations(HGraph* graph,
HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
HBooleanSimplifier* boolean_simplify = new (arena) HBooleanSimplifier(graph);
-
- HInliner* inliner = new (arena) HInliner(
- graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
-
HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining");
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
@@ -398,28 +424,29 @@ static void RunOptimizations(HGraph* graph,
graph, stats, "instruction_simplifier_after_types");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_after_bce");
- ReferenceTypePropagation* type_propagation2 =
- new (arena) ReferenceTypePropagation(graph, handles);
InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver);
- HOptimization* optimizations[] = {
+ HOptimization* optimizations1[] = {
intrinsics,
fold1,
simplify1,
type_propagation,
dce1,
- simplify2,
- inliner,
- // Run another type propagation phase: inlining will open up more opprotunities
- // to remove checkast/instanceof and null checks.
- type_propagation2,
+ simplify2
+ };
+
+ RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
+
+ MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
+
+ HOptimization* optimizations2[] = {
// BooleanSimplifier depends on the InstructionSimplifier removing redundant
// suspend checks to recognize empty blocks.
boolean_simplify,
- fold2,
+ fold2, // TODO: if we don't inline we can also skip fold2.
side_effects,
gvn,
licm,
@@ -432,7 +459,7 @@ static void RunOptimizations(HGraph* graph,
simplify4,
};
- RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
+ RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
}
// The stack map we generate must be 4-byte aligned on ARM. Since existing
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 68316c2618..d1c1134565 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -25,19 +25,35 @@ namespace art {
class RTPVisitor : public HGraphDelegateVisitor {
public:
- RTPVisitor(HGraph* graph, StackHandleScopeCollection* handles)
+ RTPVisitor(HGraph* graph,
+ StackHandleScopeCollection* handles,
+ GrowableArray<HInstruction*>* worklist,
+ ReferenceTypeInfo::TypeHandle object_class_handle,
+ ReferenceTypeInfo::TypeHandle class_class_handle,
+ ReferenceTypeInfo::TypeHandle string_class_handle)
: HGraphDelegateVisitor(graph),
- handles_(handles) {}
+ handles_(handles),
+ object_class_handle_(object_class_handle),
+ class_class_handle_(class_class_handle),
+ string_class_handle_(string_class_handle),
+ worklist_(worklist) {}
+ void VisitNullConstant(HNullConstant* null_constant) OVERRIDE;
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
+ void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
+ void VisitLoadString(HLoadString* instr) OVERRIDE;
void VisitNewArray(HNewArray* instr) OVERRIDE;
+ void VisitParameterValue(HParameterValue* instr) OVERRIDE;
void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
void VisitInvoke(HInvoke* instr) OVERRIDE;
void VisitArrayGet(HArrayGet* instr) OVERRIDE;
+ void VisitCheckCast(HCheckCast* instr) OVERRIDE;
+ void VisitNullCheck(HNullCheck* instr) OVERRIDE;
+ void VisitFakeString(HFakeString* instr) OVERRIDE;
void UpdateReferenceTypeInfo(HInstruction* instr,
uint16_t type_idx,
const DexFile& dex_file,
@@ -45,8 +61,33 @@ class RTPVisitor : public HGraphDelegateVisitor {
private:
StackHandleScopeCollection* handles_;
+ ReferenceTypeInfo::TypeHandle object_class_handle_;
+ ReferenceTypeInfo::TypeHandle class_class_handle_;
+ ReferenceTypeInfo::TypeHandle string_class_handle_;
+ GrowableArray<HInstruction*>* worklist_;
+
+ static constexpr size_t kDefaultWorklistSize = 8;
};
+ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
+ StackHandleScopeCollection* handles,
+ const char* name)
+ : HOptimization(graph, name),
+ handles_(handles),
+ worklist_(graph->GetArena(), kDefaultWorklistSize) {
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ object_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject));
+ string_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangString));
+ class_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangClass));
+
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(ReferenceTypeInfo::IsValidHandle(object_class_handle_));
+ DCHECK(ReferenceTypeInfo::IsValidHandle(class_class_handle_));
+ DCHECK(ReferenceTypeInfo::IsValidHandle(string_class_handle_));
+ }
+}
+
void ReferenceTypePropagation::Run() {
// To properly propagate type info we need to visit in the dominator-based order.
// Reverse post order guarantees a node's dominators are visited first.
@@ -55,29 +96,122 @@ void ReferenceTypePropagation::Run() {
VisitBasicBlock(it.Current());
}
ProcessWorklist();
+
+ if (kIsDebugBuild) {
+ // TODO: move this to the graph checker.
+ ScopedObjectAccess soa(Thread::Current());
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) {
+ HInstruction* instr = iti.Current();
+ if (instr->GetType() == Primitive::kPrimNot) {
+ DCHECK(instr->GetReferenceTypeInfo().IsValid())
+ << "Invalid RTI for instruction: " << instr->DebugName();
+ if (instr->IsBoundType()) {
+ DCHECK(instr->AsBoundType()->GetUpperBound().IsValid());
+ } else if (instr->IsLoadClass()) {
+ DCHECK(instr->AsLoadClass()->GetReferenceTypeInfo().IsExact());
+ DCHECK(instr->AsLoadClass()->GetLoadedClassRTI().IsValid());
+ } else if (instr->IsNullCheck()) {
+ DCHECK(instr->GetReferenceTypeInfo().IsEqual(instr->InputAt(0)->GetReferenceTypeInfo()))
+ << "NullCheck " << instr->GetReferenceTypeInfo()
+ << "Input(0) " << instr->InputAt(0)->GetReferenceTypeInfo();
+ }
+ }
+ }
+ }
+ }
}
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- // TODO: handle other instructions that give type info
- // (array accesses)
+ RTPVisitor visitor(graph_,
+ handles_,
+ &worklist_,
+ object_class_handle_,
+ class_class_handle_,
+ string_class_handle_);
+ // Handle Phis first as there might be instructions in the same block who depend on them.
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ VisitPhi(it.Current()->AsPhi());
+ }
- RTPVisitor visitor(graph_, handles_);
- // Initialize exact types first for faster convergence.
+ // Handle instructions.
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
instr->Accept(&visitor);
}
- // Handle Phis.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- VisitPhi(it.Current()->AsPhi());
- }
-
// Add extra nodes to bound types.
BoundTypeForIfNotNull(block);
BoundTypeForIfInstanceOf(block);
}
+// Create a bound type for the given object narrowing the type as much as possible.
+// The BoundType upper values for the super type and can_be_null will be taken from
+// load_class.GetLoadedClassRTI() and upper_can_be_null.
+static HBoundType* CreateBoundType(ArenaAllocator* arena,
+ HInstruction* obj,
+ HLoadClass* load_class,
+ bool upper_can_be_null)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
+ // Narrow the type as much as possible.
+ if (class_rti.GetTypeHandle()->IsFinal()) {
+ bound_type->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true));
+ } else if (obj_rti.IsValid() && class_rti.IsSupertypeOf(obj_rti)) {
+ bound_type->SetReferenceTypeInfo(obj_rti);
+ } else {
+ bound_type->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+ }
+ return bound_type;
+}
+
+// Check if we should create a bound type for the given object at the specified
+// position. Because of inlining and the fact we run RTP more than once and we
+// might have a HBoundType already. If we do, we should not create a new one.
+// In this case we also assert that there are no other uses of the object (except
+// the bound type) dominated by the specified dominator_instr or dominator_block.
+static bool ShouldCreateBoundType(HInstruction* position,
+ HInstruction* obj,
+ ReferenceTypeInfo upper_bound,
+ HInstruction* dominator_instr,
+ HBasicBlock* dominator_block)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // If the position where we should insert the bound type is not already a
+ // a bound type then we need to create one.
+ if (position == nullptr || !position->IsBoundType()) {
+ return true;
+ }
+
+ HBoundType* existing_bound_type = position->AsBoundType();
+ if (existing_bound_type->GetUpperBound().IsSupertypeOf(upper_bound)) {
+ if (kIsDebugBuild) {
+ // Check that the existing HBoundType dominates all the uses.
+ for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* user = it.Current()->GetUser();
+ if (dominator_instr != nullptr) {
+ DCHECK(!dominator_instr->StrictlyDominates(user)
+ || user == existing_bound_type
+ || existing_bound_type->StrictlyDominates(user));
+ } else if (dominator_block != nullptr) {
+ DCHECK(!dominator_block->Dominates(user->GetBlock())
+ || user == existing_bound_type
+ || existing_bound_type->StrictlyDominates(user));
+ }
+ }
+ }
+ } else {
+ // TODO: if the current bound type is a refinement we could update the
+ // existing_bound_type with the a new upper limit. However, we also need to
+ // update its users and have access to the work list.
+ }
+ return false;
+}
+
void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
HIf* ifInstruction = block->GetLastInstruction()->AsIf();
if (ifInstruction == nullptr) {
@@ -116,8 +250,23 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
HInstruction* user = it.Current()->GetUser();
if (notNullBlock->Dominates(user->GetBlock())) {
if (bound_type == nullptr) {
- bound_type = new (graph_->GetArena()) HBoundType(obj, ReferenceTypeInfo::CreateTop(false));
- notNullBlock->InsertInstructionBefore(bound_type, notNullBlock->GetFirstInstruction());
+ ScopedObjectAccess soa(Thread::Current());
+ HInstruction* insert_point = notNullBlock->GetFirstInstruction();
+ ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
+ object_class_handle_, /* is_exact */ true);
+ if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) {
+ bound_type = new (graph_->GetArena()) HBoundType(
+ obj, object_rti, /* bound_can_be_null */ false);
+ if (obj->GetReferenceTypeInfo().IsValid()) {
+ bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo());
+ }
+ notNullBlock->InsertInstructionBefore(bound_type, insert_point);
+ } else {
+ // We already have a bound type on the position we would need to insert
+ // the new one. The existing bound type should dominate all the users
+ // (dchecked) so there's no need to continue.
+ break;
+ }
}
user->ReplaceInput(bound_type, it.Current()->GetIndex());
}
@@ -171,25 +320,23 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
HInstruction* user = it.Current()->GetUser();
if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
if (bound_type == nullptr) {
+ ScopedObjectAccess soa(Thread::Current());
HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
-
- ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
- bound_type = new (graph_->GetArena()) HBoundType(obj, class_rti);
-
- // Narrow the type as much as possible.
- {
- ScopedObjectAccess soa(Thread::Current());
- if (!load_class->IsResolved() || class_rti.IsSupertypeOf(obj_rti)) {
- bound_type->SetReferenceTypeInfo(obj_rti);
- } else {
- bound_type->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
- }
+ HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
+ if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
+ bound_type = CreateBoundType(
+ graph_->GetArena(),
+ obj,
+ load_class,
+ false /* InstanceOf ensures the object is not null. */);
+ instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point);
+ } else {
+ // We already have a bound type on the position we would need to insert
+ // the new one. The existing bound type should dominate all the users
+ // (dchecked) so there's no need to continue.
+ break;
}
-
- instanceOfTrueBlock->InsertInstructionBefore(
- bound_type, instanceOfTrueBlock->GetFirstInstruction());
}
user->ReplaceInput(bound_type, it.Current()->GetIndex());
}
@@ -199,11 +346,32 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
void RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
mirror::Class* klass,
bool is_exact) {
- if (klass != nullptr) {
+ if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
+ // Calls to String.<init> are replaced with a StringFactory.
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ mirror::DexCache* dex_cache = cl->FindDexCache(instr->AsInvoke()->GetDexFile());
+ ArtMethod* method = dex_cache->GetResolvedMethod(
+ instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize());
+ DCHECK(method != nullptr);
+ mirror::Class* declaring_class = method->GetDeclaringClass();
+ DCHECK(declaring_class != nullptr);
+ DCHECK(declaring_class->IsStringClass())
+ << "Expected String class: " << PrettyDescriptor(declaring_class);
+ DCHECK(method->IsConstructor())
+ << "Expected String.<init>: " << PrettyMethod(method);
+ }
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true));
+ } else if (klass != nullptr) {
ScopedObjectAccess soa(Thread::Current());
- MutableHandle<mirror::Class> handle = handles_->NewHandle(klass);
+ ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(klass);
is_exact = is_exact || klass->IsFinal();
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
+ } else {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
}
}
@@ -219,6 +387,13 @@ void RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr,
SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
}
+void RTPVisitor::VisitNullConstant(HNullConstant* instr) {
+ // TODO: The null constant could be bound contextually (e.g. based on return statements)
+ // to a more precise type.
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+}
+
void RTPVisitor::VisitNewInstance(HNewInstance* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
@@ -227,6 +402,13 @@ void RTPVisitor::VisitNewArray(HNewArray* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
+void RTPVisitor::VisitParameterValue(HParameterValue* instr) {
+ if (instr->GetType() == Primitive::kPrimNot) {
+ // TODO: parse the signature and add precise types for the parameters.
+ SetClassAsTypeInfo(instr, nullptr, /* is_exact */ false);
+ }
+}
+
void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr,
const FieldInfo& info) {
// The field index is unknown only during tests.
@@ -238,10 +420,10 @@ void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr,
ClassLinker* cl = Runtime::Current()->GetClassLinker();
mirror::DexCache* dex_cache = cl->FindDexCache(info.GetDexFile());
ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), dex_cache);
- if (field != nullptr) {
- mirror::Class* klass = field->GetType<false>();
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
- }
+ // TODO: There are certain cases where we can't resolve the field.
+ // b/21914925 is open to keep track of a repro case for this issue.
+ mirror::Class* klass = (field == nullptr) ? nullptr : field->GetType<false>();
+ SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
void RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
@@ -258,12 +440,60 @@ void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
Runtime::Current()->GetClassLinker()->FindDexCache(instr->GetDexFile());
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
- if (resolved_class != nullptr) {
- Handle<mirror::Class> handle = handles_->NewHandle(resolved_class);
- instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true));
+ // TODO: investigating why we are still getting unresolved classes: b/22821472.
+ ReferenceTypeInfo::TypeHandle handle = (resolved_class != nullptr)
+ ? handles_->NewHandle(resolved_class)
+ : object_class_handle_;
+ instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true));
+ instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_class_handle_, /* is_exact */ true));
+}
+
+void RTPVisitor::VisitClinitCheck(HClinitCheck* instr) {
+ instr->SetReferenceTypeInfo(instr->InputAt(0)->GetReferenceTypeInfo());
+}
+
+void RTPVisitor::VisitLoadString(HLoadString* instr) {
+ instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true));
+}
+
+void RTPVisitor::VisitNullCheck(HNullCheck* instr) {
+ ScopedObjectAccess soa(Thread::Current());
+ ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
+ DCHECK(parent_rti.IsValid());
+ instr->SetReferenceTypeInfo(parent_rti);
+}
+
+void RTPVisitor::VisitFakeString(HFakeString* instr) {
+ instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true));
+}
+
+void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) {
+ HInstruction* obj = check_cast->InputAt(0);
+ HBoundType* bound_type = nullptr;
+ for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* user = it.Current()->GetUser();
+ if (check_cast->StrictlyDominates(user)) {
+ if (bound_type == nullptr) {
+ ScopedObjectAccess soa(Thread::Current());
+ HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ if (ShouldCreateBoundType(check_cast->GetNext(), obj, class_rti, check_cast, nullptr)) {
+ bound_type = CreateBoundType(
+ GetGraph()->GetArena(),
+ obj,
+ load_class,
+ true /* CheckCast succeeds for nulls. */);
+ check_cast->GetBlock()->InsertInstructionAfter(bound_type, check_cast);
+ } else {
+ // We already have a bound type on the position we would need to insert
+ // the new one. The existing bound type should dominate all the users
+ // (dchecked) so there's no need to continue.
+ break;
+ }
+ }
+ user->ReplaceInput(bound_type, it.Current()->GetIndex());
+ }
}
- Handle<mirror::Class> class_handle = handles_->NewHandle(mirror::Class::GetJavaLangClass());
- instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_handle, /* is_exact */ true));
}
void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
@@ -290,29 +520,54 @@ void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
ReferenceTypeInfo ReferenceTypePropagation::MergeTypes(const ReferenceTypeInfo& a,
const ReferenceTypeInfo& b) {
+ if (!b.IsValid()) {
+ return a;
+ }
+ if (!a.IsValid()) {
+ return b;
+ }
+
bool is_exact = a.IsExact() && b.IsExact();
- bool is_top = a.IsTop() || b.IsTop();
Handle<mirror::Class> type_handle;
- if (!is_top) {
- if (a.GetTypeHandle().Get() == b.GetTypeHandle().Get()) {
- type_handle = a.GetTypeHandle();
- } else if (a.IsSupertypeOf(b)) {
- type_handle = a.GetTypeHandle();
- is_exact = false;
- } else if (b.IsSupertypeOf(a)) {
- type_handle = b.GetTypeHandle();
- is_exact = false;
- } else {
- // TODO: Find a common super class.
- is_top = true;
- is_exact = false;
- }
+ if (a.GetTypeHandle().Get() == b.GetTypeHandle().Get()) {
+ type_handle = a.GetTypeHandle();
+ } else if (a.IsSupertypeOf(b)) {
+ type_handle = a.GetTypeHandle();
+ is_exact = false;
+ } else if (b.IsSupertypeOf(a)) {
+ type_handle = b.GetTypeHandle();
+ is_exact = false;
+ } else {
+ // TODO: Find the first common super class.
+ type_handle = object_class_handle_;
+ is_exact = false;
}
- return is_top
- ? ReferenceTypeInfo::CreateTop(is_exact)
- : ReferenceTypeInfo::Create(type_handle, is_exact);
+ return ReferenceTypeInfo::Create(type_handle, is_exact);
+}
+
+static void UpdateArrayGet(HArrayGet* instr,
+ StackHandleScopeCollection* handles,
+ ReferenceTypeInfo::TypeHandle object_class_handle)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK_EQ(Primitive::kPrimNot, instr->GetType());
+
+ ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
+ DCHECK(parent_rti.IsValid());
+
+ Handle<mirror::Class> handle = parent_rti.GetTypeHandle();
+ if (handle->IsObjectArrayClass()) {
+ ReferenceTypeInfo::TypeHandle component_handle = handles->NewHandle(handle->GetComponentType());
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(component_handle, /* is_exact */ false));
+ } else {
+ // We don't know what the parent actually is, so we fallback to object.
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle, /* is_exact */ false));
+ }
+
+ return;
}
bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) {
@@ -323,6 +578,15 @@ bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) {
UpdateBoundType(instr->AsBoundType());
} else if (instr->IsPhi()) {
UpdatePhi(instr->AsPhi());
+ } else if (instr->IsNullCheck()) {
+ ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo();
+ if (parent_rti.IsValid()) {
+ instr->SetReferenceTypeInfo(parent_rti);
+ }
+ } else if (instr->IsArrayGet()) {
+ // TODO: consider if it's worth "looking back" and bounding the input object
+ // to an array type.
+ UpdateArrayGet(instr->AsArrayGet(), handles_, object_class_handle_);
} else {
LOG(FATAL) << "Invalid instruction (should not get here)";
}
@@ -340,45 +604,45 @@ void RTPVisitor::VisitInvoke(HInvoke* instr) {
mirror::DexCache* dex_cache = cl->FindDexCache(instr->GetDexFile());
ArtMethod* method = dex_cache->GetResolvedMethod(
instr->GetDexMethodIndex(), cl->GetImagePointerSize());
- if (method != nullptr) {
- mirror::Class* klass = method->GetReturnType(false);
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
- }
+ mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
void RTPVisitor::VisitArrayGet(HArrayGet* instr) {
if (instr->GetType() != Primitive::kPrimNot) {
return;
}
-
- HInstruction* parent = instr->InputAt(0);
ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::Class> handle = parent->GetReferenceTypeInfo().GetTypeHandle();
- if (handle.GetReference() != nullptr && handle->IsObjectArrayClass()) {
- SetClassAsTypeInfo(instr, handle->GetComponentType(), /* is_exact */ false);
+ UpdateArrayGet(instr, handles_, object_class_handle_);
+ if (!instr->GetReferenceTypeInfo().IsValid()) {
+ worklist_->Add(instr);
}
}
void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) {
ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- // Be sure that we don't go over the bounded type.
- ReferenceTypeInfo bound_rti = instr->GetBoundType();
- if (!bound_rti.IsSupertypeOf(new_rti)) {
- new_rti = bound_rti;
+ if (!new_rti.IsValid()) {
+ return; // No new info yet.
+ }
+
+ // Make sure that we don't go over the bounded type.
+ ReferenceTypeInfo upper_bound_rti = instr->GetUpperBound();
+ if (!upper_bound_rti.IsSupertypeOf(new_rti)) {
+ new_rti = upper_bound_rti;
}
instr->SetReferenceTypeInfo(new_rti);
}
void ReferenceTypePropagation::UpdatePhi(HPhi* instr) {
ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo();
- if (new_rti.IsTop() && !new_rti.IsExact()) {
- // Early return if we are Top and inexact.
+ if (new_rti.IsValid() && new_rti.IsObjectClass() && !new_rti.IsExact()) {
+ // Early return if we are Object and inexact.
instr->SetReferenceTypeInfo(new_rti);
return;
}
for (size_t i = 1; i < instr->InputCount(); i++) {
new_rti = MergeTypes(new_rti, instr->InputAt(i)->GetReferenceTypeInfo());
- if (new_rti.IsTop()) {
+ if (new_rti.IsValid() && new_rti.IsObjectClass()) {
if (!new_rti.IsExact()) {
break;
} else {
@@ -392,21 +656,31 @@ void ReferenceTypePropagation::UpdatePhi(HPhi* instr) {
// Re-computes and updates the nullability of the instruction. Returns whether or
// not the nullability was changed.
bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) {
- DCHECK(instr->IsPhi() || instr->IsBoundType());
+ DCHECK(instr->IsPhi()
+ || instr->IsBoundType()
+ || instr->IsNullCheck()
+ || instr->IsArrayGet());
- if (!instr->IsPhi()) {
+ if (!instr->IsPhi() && !instr->IsBoundType()) {
return false;
}
- HPhi* phi = instr->AsPhi();
- bool existing_can_be_null = phi->CanBeNull();
- bool new_can_be_null = false;
- for (size_t i = 0; i < phi->InputCount(); i++) {
- new_can_be_null |= phi->InputAt(i)->CanBeNull();
+ bool existing_can_be_null = instr->CanBeNull();
+ if (instr->IsPhi()) {
+ HPhi* phi = instr->AsPhi();
+ bool new_can_be_null = false;
+ for (size_t i = 0; i < phi->InputCount(); i++) {
+ if (phi->InputAt(i)->CanBeNull()) {
+ new_can_be_null = true;
+ break;
+ }
+ }
+ phi->SetCanBeNull(new_can_be_null);
+ } else if (instr->IsBoundType()) {
+ HBoundType* bound_type = instr->AsBoundType();
+ bound_type->SetCanBeNull(instr->InputAt(0)->CanBeNull() && bound_type->GetUpperCanBeNull());
}
- phi->SetCanBeNull(new_can_be_null);
-
- return existing_can_be_null != new_can_be_null;
+ return existing_can_be_null != instr->CanBeNull();
}
void ReferenceTypePropagation::ProcessWorklist() {
@@ -419,14 +693,18 @@ void ReferenceTypePropagation::ProcessWorklist() {
}
void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) {
- DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot) << instruction->GetType();
+ DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot)
+ << instruction->DebugName() << ":" << instruction->GetType();
worklist_.Add(instruction);
}
void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
HInstruction* user = it.Current()->GetUser();
- if (user->IsPhi() || user->IsBoundType()) {
+ if (user->IsPhi()
+ || user->IsBoundType()
+ || user->IsNullCheck()
+ || (user->IsArrayGet() && (user->GetType() == Primitive::kPrimNot))) {
AddToWorklist(user);
}
}
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 11f5ac91ca..14d4a82e9b 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -30,10 +30,9 @@ namespace art {
*/
class ReferenceTypePropagation : public HOptimization {
public:
- ReferenceTypePropagation(HGraph* graph, StackHandleScopeCollection* handles)
- : HOptimization(graph, kReferenceTypePropagationPassName),
- handles_(handles),
- worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+ ReferenceTypePropagation(HGraph* graph,
+ StackHandleScopeCollection* handles,
+ const char* name = kReferenceTypePropagationPassName);
void Run() OVERRIDE;
@@ -60,6 +59,10 @@ class ReferenceTypePropagation : public HOptimization {
GrowableArray<HInstruction*> worklist_;
+ ReferenceTypeInfo::TypeHandle object_class_handle_;
+ ReferenceTypeInfo::TypeHandle class_class_handle_;
+ ReferenceTypeInfo::TypeHandle string_class_handle_;
+
static constexpr size_t kDefaultWorklistSize = 8;
DISALLOW_COPY_AND_ASSIGN(ReferenceTypePropagation);
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index fa85ada864..44efc65e3f 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1515,6 +1515,14 @@ void X86Assembler::repne_scasw() {
}
+void X86Assembler::repe_cmpsw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF3);
+ EmitUint8(0xA7);
+}
+
+
X86Assembler* X86Assembler::lock() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF0);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index d1b4e1dc5f..e2abcde624 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -465,6 +465,7 @@ class X86Assembler FINAL : public Assembler {
void jmp(Label* label);
void repne_scasw();
+ void repe_cmpsw();
X86Assembler* lock();
void cmpxchgl(const Address& address, Register reg);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index aacc57bb0c..0e8c4aee0c 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -196,4 +196,10 @@ TEST_F(AssemblerX86Test, Repnescasw) {
DriverStr(expected, "Repnescasw");
}
+TEST_F(AssemblerX86Test, Repecmpsw) {
+ GetAssembler()->repe_cmpsw();
+ const char* expected = "repe cmpsw\n";
+ DriverStr(expected, "Repecmpsw");
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index f35f51c494..93c90db5d3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2073,6 +2073,14 @@ void X86_64Assembler::repne_scasw() {
}
+void X86_64Assembler::repe_cmpsw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF3);
+ EmitUint8(0xA7);
+}
+
+
void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
// TODO: Need to have a code constants table.
int64_t constant = bit_cast<int64_t, double>(value);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 61ffeab1e8..0cd31971b4 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -603,6 +603,7 @@ class X86_64Assembler FINAL : public Assembler {
void bswapq(CpuRegister dst);
void repne_scasw();
+ void repe_cmpsw();
//
// Macros for High-level operations.
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 6da5c35731..422138ce8e 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1263,4 +1263,10 @@ TEST_F(AssemblerX86_64Test, Repnescasw) {
DriverStr(expected, "Repnescasw");
}
+TEST_F(AssemblerX86_64Test, Repecmpsw) {
+ GetAssembler()->repe_cmpsw();
+ const char* expected = "repe cmpsw\n";
+ DriverStr(expected, "Repecmpsw");
+}
+
} // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index bffb3b5155..75d6137eae 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -280,6 +280,18 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" Example: --num-dex-method=%d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError(" Default: %d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError("");
+ UsageError(" --inline-depth-limit=<depth-limit>: the depth limit of inlining for fine tuning");
+ UsageError(" the compiler. A zero value will disable inlining. Honored only by Optimizing.");
+ UsageError(" Example: --inline-depth-limit=%d", CompilerOptions::kDefaultInlineDepthLimit);
+ UsageError(" Default: %d", CompilerOptions::kDefaultInlineDepthLimit);
+ UsageError("");
+ UsageError(" --inline-max-code-units=<code-units-count>: the maximum code units that a method");
+ UsageError(" can have to be considered for inlining. A zero value will disable inlining.");
+ UsageError(" Honored only by Optimizing.");
+ UsageError(" Example: --inline-max-code-units=%d",
+ CompilerOptions::kDefaultInlineMaxCodeUnits);
+ UsageError(" Default: %d", CompilerOptions::kDefaultInlineMaxCodeUnits);
+ UsageError("");
UsageError(" --dump-timing: display a breakdown of where time was spent");
UsageError("");
UsageError(" --include-patch-information: Include patching information so the generated code");
@@ -550,6 +562,8 @@ class Dex2Oat FINAL {
int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold;
int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold;
int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold;
+ int inline_depth_limit = CompilerOptions::kDefaultInlineDepthLimit;
+ int inline_max_code_units = CompilerOptions::kDefaultInlineMaxCodeUnits;
// Profile file to use
double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
@@ -720,6 +734,22 @@ class Dex2Oat FINAL {
if (num_dex_methods_threshold < 0) {
Usage("--num-dex-methods passed a negative value %s", num_dex_methods_threshold);
}
+ } else if (option.starts_with("--inline-depth-limit=")) {
+ const char* limit = option.substr(strlen("--inline-depth-limit=")).data();
+ if (!ParseInt(limit, &inline_depth_limit)) {
+ Usage("Failed to parse --inline-depth-limit '%s' as an integer", limit);
+ }
+ if (inline_depth_limit < 0) {
+ Usage("--inline-depth-limit passed a negative value %s", inline_depth_limit);
+ }
+ } else if (option.starts_with("--inline-max-code-units=")) {
+ const char* code_units = option.substr(strlen("--inline-max-code-units=")).data();
+ if (!ParseInt(code_units, &inline_max_code_units)) {
+ Usage("Failed to parse --inline-max-code-units '%s' as an integer", code_units);
+ }
+ if (inline_max_code_units < 0) {
+ Usage("--inline-max-code-units passed a negative value %s", inline_max_code_units);
+ }
} else if (option == "--host") {
is_host_ = true;
} else if (option == "--runtime-arg") {
@@ -992,6 +1022,8 @@ class Dex2Oat FINAL {
small_method_threshold,
tiny_method_threshold,
num_dex_methods_threshold,
+ inline_depth_limit,
+ inline_max_code_units,
include_patch_information,
top_k_profile_threshold,
debuggable,
diff --git a/runtime/Android.mk b/runtime/Android.mk
index fe79e72031..4a944963a2 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -39,6 +39,7 @@ LIBART_COMMON_SRC_FILES := \
base/unix_file/random_access_file_utils.cc \
check_jni.cc \
class_linker.cc \
+ class_table.cc \
common_throws.cc \
debugger.cc \
dex_file.cc \
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 2f2654d4f6..be9af9871d 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -171,6 +171,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
}
} // namespace art
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 20001109a6..f6d954f4f1 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -51,7 +51,6 @@
sub sp, #12 @ 3 words of space, bottom word will hold Method*
.cfi_adjust_cfa_offset 12
RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- THIS_LOAD_REQUIRES_READ_BARRIER
ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*.
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
@@ -79,7 +78,6 @@
sub sp, #4 @ bottom word will hold Method*
.cfi_adjust_cfa_offset 4
RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- THIS_LOAD_REQUIRES_READ_BARRIER
ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
@@ -139,7 +137,6 @@
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- THIS_LOAD_REQUIRES_READ_BARRIER
@ rTemp1 is kRefsAndArgs Method*.
ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
@@ -171,7 +168,6 @@
.cfi_adjust_cfa_offset -40
.endm
-
.macro RETURN_IF_RESULT_IS_ZERO
cbnz r0, 1f @ result non-zero branch over
bx lr @ return
@@ -588,6 +584,59 @@ ENTRY art_quick_check_cast
bkpt
END art_quick_check_cast
+// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude.
+.macro POP_REG_NE rReg, offset, rExclude
+ .ifnc \rReg, \rExclude
+ ldr \rReg, [sp, #\offset] @ restore rReg
+ .cfi_restore \rReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER rDest, rObj, offset
+#ifdef USE_READ_BARRIER
+ push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj)
+ .cfi_adjust_cfa_offset 24
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ .cfi_rel_offset ip, 16
+ .cfi_rel_offset lr, 20
+ sub sp, #8 @ push padding
+ .cfi_adjust_cfa_offset 8
+ @ mov r0, r0 @ pass ref in r0 (no-op for now since parameter ref is unused)
+ .ifnc \rObj, r1
+ mov r1, \rObj @ pass rObj
+ .endif
+ mov r2, #\offset @ pass offset
+ bl artReadBarrierSlow @ artReadBarrierSlow(ref, rObj, offset)
+ @ No need to unpoison return value in r0, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc \rDest, r0
+ mov \rDest, r0 @ save return value in rDest
+ .endif
+ add sp, #8 @ pop padding
+ .cfi_adjust_cfa_offset -8
+ POP_REG_NE r0, 0, \rDest @ conditionally restore saved registers
+ POP_REG_NE r1, 4, \rDest
+ POP_REG_NE r2, 8, \rDest
+ POP_REG_NE r3, 12, \rDest
+ POP_REG_NE ip, 16, \rDest
+ add sp, #20
+ .cfi_adjust_cfa_offset -20
+ pop {lr} @ restore lr
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore lr
+#else
+ ldr \rDest, [\rObj, #\offset]
+ UNPOISON_HEAP_REF \rDest
+#endif // USE_READ_BARRIER
+.endm
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -609,15 +658,21 @@ ENTRY art_quick_aput_obj_with_bound_check
b art_quick_throw_array_bounds
END art_quick_aput_obj_with_bound_check
+#ifdef USE_READ_BARRIER
+ .extern artReadBarrierSlow
+#endif
.hidden art_quick_aput_obj
ENTRY art_quick_aput_obj
+#ifdef USE_READ_BARRIER
+ @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from READ_BARRIER macro.
+ tst r2, r2
+ beq .Ldo_aput_null
+#else
cbz r2, .Ldo_aput_null
- ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET]
- UNPOISON_HEAP_REF r3
- ldr ip, [r2, #MIRROR_OBJECT_CLASS_OFFSET]
- UNPOISON_HEAP_REF ip
- ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
- UNPOISON_HEAP_REF r3
+#endif // USE_READ_BARRIER
+ READ_BARRIER r3, r0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER ip, r2, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
cmp r3, ip @ value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 2ce2a29bbf..0f06727d0d 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -155,6 +155,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
};
} // namespace art
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6d9b44a1d2..548ab47f82 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -31,8 +31,6 @@
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
- THIS_LOAD_REQUIRES_READ_BARRIER
-
// Loads appropriate callee-save-method.
ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
@@ -95,8 +93,6 @@
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly] .
- THIS_LOAD_REQUIRES_READ_BARRIER
-
// Loads appropriate callee-save-method.
ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
@@ -251,7 +247,6 @@
ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
- THIS_LOAD_REQUIRES_READ_BARRIER
ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
@@ -1119,6 +1114,62 @@ ENTRY art_quick_check_cast
brk 0 // We should not return here...
END art_quick_check_cast
+// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
+.macro POP_REG_NE xReg, offset, xExclude
+ .ifnc \xReg, \xExclude
+ ldr \xReg, [sp, #\offset] // restore xReg
+ .cfi_restore \xReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * xDest, wDest and xObj are registers, offset is a defined literal such as
+ * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle
+ * name mismatch between instructions. This macro uses the lower 32b of register when possible.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER xDest, wDest, xObj, offset
+#ifdef USE_READ_BARRIER
+ // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
+ stp x0, x1, [sp, #-48]!
+ .cfi_adjust_cfa_offset 48
+ .cfi_rel_offset x0, 0
+ .cfi_rel_offset x1, 8
+ stp x2, x3, [sp, #16]
+ .cfi_rel_offset x2, 16
+ .cfi_rel_offset x3, 24
+ stp x4, xLR, [sp, #32]
+ .cfi_rel_offset x4, 32
+ .cfi_rel_offset x30, 40
+
+ // mov x0, x0 // pass ref in x0 (no-op for now since parameter ref is unused)
+ .ifnc \xObj, x1
+ mov x1, \xObj // pass xObj
+ .endif
+ mov w2, #\offset // pass offset
+ bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset)
+ // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc \wDest, w0
+ mov \wDest, w0 // save return value in wDest
+ .endif
+
+ // Conditionally restore saved registers
+ POP_REG_NE x0, 0, \xDest
+ POP_REG_NE x1, 8, \xDest
+ POP_REG_NE x2, 16, \xDest
+ POP_REG_NE x3, 24, \xDest
+ POP_REG_NE x4, 32, \xDest
+ ldr xLR, [sp, #40]
+ .cfi_restore x30
+ add sp, sp, #48
+ .cfi_adjust_cfa_offset -48
+#else
+ ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
+ UNPOISON_HEAP_REF \wDest
+#endif // USE_READ_BARRIER
+.endm
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -1146,17 +1197,17 @@ ENTRY art_quick_aput_obj_with_bound_check
b art_quick_throw_array_bounds
END art_quick_aput_obj_with_bound_check
+#ifdef USE_READ_BARRIER
+ .extern artReadBarrierSlow
+#endif
ENTRY art_quick_aput_obj
cbz x2, .Ldo_aput_null
- ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
+ READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
// This also zero-extends to x3
- UNPOISON_HEAP_REF w3
- ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
+ READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b
// This also zero-extends to x4
- UNPOISON_HEAP_REF w4
- ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b
+ READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b
// This also zero-extends to x3
- UNPOISON_HEAP_REF w3
cmp w3, w4 // value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
index b1aa3ee63f..f9c53152f6 100644
--- a/runtime/arch/mips/entrypoints_direct_mips.h
+++ b/runtime/arch/mips/entrypoints_direct_mips.h
@@ -44,7 +44,8 @@ static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) {
entrypoint == kQuickCmpgDouble ||
entrypoint == kQuickCmpgFloat ||
entrypoint == kQuickCmplDouble ||
- entrypoint == kQuickCmplFloat;
+ entrypoint == kQuickCmplFloat ||
+ entrypoint == kQuickReadBarrierSlow;
}
} // namespace art
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 09a018ebc6..4e4b91fdcd 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -279,6 +279,8 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierJni), "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
+ static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
};
} // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 2819f92a0d..4d5004f444 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -79,7 +79,6 @@
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- THIS_LOAD_REQUIRES_READ_BARRIER
lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -127,7 +126,6 @@
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- THIS_LOAD_REQUIRES_READ_BARRIER
lw $t0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -219,7 +217,6 @@
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
lw $t0, 0($t0)
- THIS_LOAD_REQUIRES_READ_BARRIER
lw $t0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t0)
sw $t0, 0($sp) # Place Method* at bottom of stack.
sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -627,6 +624,76 @@ ENTRY art_quick_check_cast
END art_quick_check_cast
/*
+ * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
+ * nReg is the register number for rReg.
+ */
+.macro POP_REG_NE rReg, nReg, offset, rExclude
+ .ifnc \rReg, \rExclude
+ lw \rReg, \offset($sp) # restore rReg
+ .cfi_restore \nReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER rDest, rObj, offset
+#ifdef USE_READ_BARRIER
+ # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment.
+ addiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 28($sp)
+ .cfi_rel_offset 31, 28
+ sw $t9, 24($sp)
+ .cfi_rel_offset 25, 24
+ sw $t1, 20($sp)
+ .cfi_rel_offset 9, 20
+ sw $t0, 16($sp)
+ .cfi_rel_offset 8, 16
+ sw $a2, 8($sp) # padding slot at offset 12 (padding can be any slot in the 32B)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ sw $a0, 0($sp)
+ .cfi_rel_offset 4, 0
+
+ # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused)
+ .ifnc \rObj, $a1
+ move $a1, \rObj # pass rObj
+ .endif
+ addiu $a2, $zero, \offset # pass offset
+ jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset)
+ addiu $sp, $sp, -16 # Use branch delay slot to reserve argument slots on the stack
+ # before the call to artReadBarrierSlow.
+ addiu $sp, $sp, 16 # restore stack after call to artReadBarrierSlow
+ # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
+ move \rDest, $v0 # save return value in rDest
+ # (rDest cannot be v0 in art_quick_aput_obj)
+
+ lw $a0, 0($sp) # restore registers except rDest
+ # (rDest can only be t0 or t1 in art_quick_aput_obj)
+ .cfi_restore 4
+ lw $a1, 4($sp)
+ .cfi_restore 5
+ lw $a2, 8($sp)
+ .cfi_restore 6
+ POP_REG_NE $t0, 8, 16, \rDest
+ POP_REG_NE $t1, 9, 20, \rDest
+ lw $t9, 24($sp)
+ .cfi_restore 25
+ lw $ra, 28($sp) # restore $ra
+ .cfi_restore 31
+ addiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+#else
+ lw \rDest, \offset(\rObj)
+ UNPOISON_HEAP_REF \rDest
+#endif // USE_READ_BARRIER
+.endm
+
+ /*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
* a0 = array, a1 = index, a2 = value
@@ -648,15 +715,15 @@ ENTRY art_quick_aput_obj_with_bound_check
move $a1, $t0
END art_quick_aput_obj_with_bound_check
+#ifdef USE_READ_BARRIER
+ .extern artReadBarrierSlow
+#endif
ENTRY art_quick_aput_obj
beqz $a2, .Ldo_aput_null
nop
- lw $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
- UNPOISON_HEAP_REF $t0
- lw $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
- UNPOISON_HEAP_REF $t1
- lw $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
- UNPOISON_HEAP_REF $t0
+ READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 4904af9cfc..ec02d5ab69 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -186,6 +186,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = artReadBarrierSlow;
};
} // namespace art
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index abca70b363..c30e6ca93f 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -89,7 +89,6 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- THIS_LOAD_REQUIRES_READ_BARRIER
ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place ArtMethod* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -132,7 +131,6 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- THIS_LOAD_REQUIRES_READ_BARRIER
ld $t1, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -255,7 +253,6 @@
# load appropriate callee-save-method
ld $t1, %got(_ZN3art7Runtime9instance_E)($gp)
ld $t1, 0($t1)
- THIS_LOAD_REQUIRES_READ_BARRIER
ld $t1, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t1)
sd $t1, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
@@ -888,6 +885,77 @@ ENTRY art_quick_check_cast
move $a2, rSELF # pass Thread::Current
END art_quick_check_cast
+
+ /*
+ * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
+ * nReg is the register number for rReg.
+ */
+.macro POP_REG_NE rReg, nReg, offset, rExclude
+ .ifnc \rReg, \rExclude
+ ld \rReg, \offset($sp) # restore rReg
+ .cfi_restore \nReg
+ .endif
+.endm
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+.macro READ_BARRIER rDest, rObj, offset
+#ifdef USE_READ_BARRIER
+ # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 16B-aligned.
+ daddiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sd $ra, 56($sp)
+ .cfi_rel_offset 31, 56
+ sd $t9, 48($sp)
+ .cfi_rel_offset 25, 48
+ sd $t1, 40($sp)
+ .cfi_rel_offset 13, 40
+ sd $t0, 32($sp)
+ .cfi_rel_offset 12, 32
+ sd $a2, 16($sp) # padding slot at offset 24 (padding can be any slot in the 64B)
+ .cfi_rel_offset 6, 16
+ sd $a1, 8($sp)
+ .cfi_rel_offset 5, 8
+ sd $a0, 0($sp)
+ .cfi_rel_offset 4, 0
+
+ # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused)
+ .ifnc \rObj, $a1
+ move $a1, \rObj # pass rObj
+ .endif
+ daddiu $a2, $zero, \offset # pass offset
+ jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset)
+ .cpreturn # Restore gp from t8 in branch delay slot.
+ # t8 may be clobbered in artReadBarrierSlow.
+ # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
+ move \rDest, $v0 # save return value in rDest
+ # (rDest cannot be v0 in art_quick_aput_obj)
+
+ ld $a0, 0($sp) # restore registers except rDest
+ # (rDest can only be t0 or t1 in art_quick_aput_obj)
+ .cfi_restore 4
+ ld $a1, 8($sp)
+ .cfi_restore 5
+ ld $a2, 16($sp)
+ .cfi_restore 6
+ POP_REG_NE $t0, 12, 32, \rDest
+ POP_REG_NE $t1, 13, 40, \rDest
+ ld $t9, 48($sp)
+ .cfi_restore 25
+ ld $ra, 56($sp) # restore $ra
+ .cfi_restore 31
+ daddiu $sp, $sp, 64
+ .cfi_adjust_cfa_offset -64
+ SETUP_GP # set up gp because we are not returning
+#else
+ lwu \rDest, \offset(\rObj)
+ UNPOISON_HEAP_REF \rDest
+#endif // USE_READ_BARRIER
+.endm
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -913,12 +981,9 @@ END art_quick_aput_obj_with_bound_check
ENTRY art_quick_aput_obj
beq $a2, $zero, .Ldo_aput_null
nop
- lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
- UNPOISON_HEAP_REF $t0
- lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
- UNPOISON_HEAP_REF $t1
- lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
- UNPOISON_HEAP_REF $t0
+ READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 0831c26e9a..cf7db34ca1 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1124,8 +1124,6 @@ TEST_F(StubTest, CheckCast) {
TEST_F(StubTest, APutObj) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
@@ -1258,8 +1256,6 @@ TEST_F(StubTest, APutObj) {
}
TEST_F(StubTest, AllocObject) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
// This will lead to OOM error messages in the log.
@@ -1385,8 +1381,6 @@ TEST_F(StubTest, AllocObject) {
}
TEST_F(StubTest, AllocObjectArray) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
@@ -1474,8 +1468,6 @@ TEST_F(StubTest, AllocObjectArray) {
TEST_F(StubTest, StringCompareTo) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
@@ -2152,8 +2144,6 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
}
TEST_F(StubTest, Fields8) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2166,8 +2156,6 @@ TEST_F(StubTest, Fields8) {
}
TEST_F(StubTest, Fields16) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2180,8 +2168,6 @@ TEST_F(StubTest, Fields16) {
}
TEST_F(StubTest, Fields32) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2193,8 +2179,6 @@ TEST_F(StubTest, Fields32) {
}
TEST_F(StubTest, FieldsObj) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2206,8 +2190,6 @@ TEST_F(StubTest, FieldsObj) {
}
TEST_F(StubTest, Fields64) {
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
self->TransitionFromSuspendedToRunnable();
@@ -2221,8 +2203,6 @@ TEST_F(StubTest, Fields64) {
TEST_F(StubTest, IMT) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -2342,8 +2322,6 @@ TEST_F(StubTest, IMT) {
TEST_F(StubTest, StringIndexOf) {
#if defined(__arm__) || defined(__aarch64__)
- TEST_DISABLED_FOR_READ_BARRIER();
-
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -2416,4 +2394,40 @@ TEST_F(StubTest, StringIndexOf) {
#endif
}
+TEST_F(StubTest, ReadBarrier) {
+#if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
+ Thread* self = Thread::Current();
+
+ const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
+
+ // Create an object
+ ScopedObjectAccess soa(self);
+ // garbage is created during ClassLinker::Init
+
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+
+ // Build an object instance
+ Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
+ mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+ EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
+ mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
+ EXPECT_EQ(klass, obj->GetClass());
+
+ // Tests done.
+#else
+ LOG(INFO) << "Skipping read_barrier_slow";
+ // Force-print to std::cout so it's also outside the logcat.
+ std::cout << "Skipping read_barrier_slow" << std::endl;
+#endif
+}
+
} // namespace art
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 737f4d1c5b..e2632c103b 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -28,6 +28,9 @@ namespace art {
extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
const mirror::Class* ref_class);
+// Read barrier entrypoints.
+extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
+
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
QuickEntryPoints* qpoints) {
// Interpreter
@@ -141,6 +144,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
};
} // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index ebfb3faf4b..1da5a2ff17 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -33,7 +33,6 @@ MACRO2(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME, got_reg, temp_reg)
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
@@ -60,7 +59,6 @@ MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME, got_reg, temp_reg)
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
@@ -106,7 +104,6 @@ MACRO2(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME, got_reg, temp_reg)
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
- THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the stop quick frame.
@@ -1126,6 +1123,53 @@ DEFINE_FUNCTION art_quick_check_cast
UNREACHABLE
END_FUNCTION art_quick_check_cast
+// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
+MACRO2(POP_REG_NE, reg, exclude_reg)
+ .ifc RAW_VAR(reg), RAW_VAR(exclude_reg)
+ addl MACRO_LITERAL(4), %esp
+ CFI_ADJUST_CFA_OFFSET(-4)
+ .else
+ POP RAW_VAR(reg)
+ .endif
+END_MACRO
+
+ /*
+ * Macro to insert read barrier, only used in art_quick_aput_obj.
+ * obj_reg and dest_reg are registers, offset is a defined literal such as
+ * MIRROR_OBJECT_CLASS_OFFSET.
+ * pop_eax is a boolean flag, indicating if eax is popped after the call.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ */
+MACRO4(READ_BARRIER, obj_reg, offset, dest_reg, pop_eax)
+#ifdef USE_READ_BARRIER
+ PUSH eax // save registers used in art_quick_aput_obj
+ PUSH ebx
+ PUSH edx
+ PUSH ecx
+ // Outgoing argument set up
+ pushl MACRO_LITERAL((RAW_VAR(offset))) // pass offset, double parentheses are necessary
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH RAW_VAR(obj_reg) // pass obj_reg
+ PUSH eax // pass ref, just pass eax for now since parameter ref is unused
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
+ // No need to unpoison return value in eax, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc RAW_VAR(dest_reg), eax
+ movl %eax, REG_VAR(dest_reg) // save loaded ref in dest_reg
+ .endif
+ addl MACRO_LITERAL(12), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-12)
+ POP_REG_NE ecx, RAW_VAR(dest_reg) // Restore args except dest_reg
+ POP_REG_NE edx, RAW_VAR(dest_reg)
+ POP_REG_NE ebx, RAW_VAR(dest_reg)
+ .ifc RAW_VAR(pop_eax), true
+ POP_REG_NE eax, RAW_VAR(dest_reg)
+ .endif
+#else
+ movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg)
+ UNPOISON_HEAP_REF RAW_VAR(dest_reg)
+#endif // USE_READ_BARRIER
+END_MACRO
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -1149,17 +1193,20 @@ END_FUNCTION art_quick_aput_obj_with_bound_check
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
jz .Ldo_aput_null
- movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx
- UNPOISON_HEAP_REF ebx
- movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
- UNPOISON_HEAP_REF ebx
+ READ_BARRIER eax, MIRROR_OBJECT_CLASS_OFFSET, ebx, true
+ READ_BARRIER ebx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ebx, true
// value's type == array's component type - trivial assignability
-#ifdef USE_HEAP_POISONING
- PUSH eax // save eax
+#if defined(USE_READ_BARRIER)
+ READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false
+ cmpl %eax, %ebx
+ POP eax // restore eax from the push in the beginning of READ_BARRIER macro
+#elif defined(USE_HEAP_POISONING)
+ PUSH eax // save eax
+ // Cannot call READ_BARRIER macro here, because the above push messes up stack alignment.
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
cmpl %eax, %ebx
- POP eax // restore eax
+ POP eax // restore eax
#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx
#endif
@@ -1181,6 +1228,8 @@ DEFINE_FUNCTION art_quick_aput_obj
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
#ifdef USE_HEAP_POISONING
+ // This load does not need read barrier, since edx is unchanged and there's no GC safe point
+ // from last read of MIRROR_OBJECT_CLASS_OFFSET(%edx).
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // pass arg2 - type of the value to be stored
UNPOISON_HEAP_REF eax
PUSH eax
@@ -1696,5 +1745,15 @@ DEFINE_FUNCTION art_nested_signal_return
UNREACHABLE
END_FUNCTION art_nested_signal_return
+DEFINE_FUNCTION art_quick_read_barrier_slow
+ PUSH edx // pass arg3 - offset
+ PUSH ecx // pass arg2 - obj
+ PUSH eax // pass arg1 - ref
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
+ addl LITERAL(12), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-12)
+ ret
+END_FUNCTION art_quick_read_barrier_slow
+
// TODO: implement these!
UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 706ae58d91..cf0039c84e 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -24,6 +24,7 @@
#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4
#define END_MACRO .endm
#if defined(__clang__)
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index d0ab9d5d49..ef1bb5f9a7 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -29,6 +29,9 @@ namespace art {
extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
const mirror::Class* ref_class);
+// Read barrier entrypoints.
+extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
+
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
QuickEntryPoints* qpoints) {
#if defined(__APPLE__)
@@ -145,6 +148,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// Read barrier
qpoints->pReadBarrierJni = ReadBarrierJni;
+ qpoints->pReadBarrierSlow = art_quick_read_barrier_slow;
#endif // __APPLE__
};
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 0eeb03a526..f4c9488260 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -66,7 +66,6 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for save all callee save frame method.
- THIS_LOAD_REQUIRES_READ_BARRIER
movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
@@ -109,7 +108,6 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME)
movq %xmm14, 24(%rsp)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for refs only callee save frame method.
- THIS_LOAD_REQUIRES_READ_BARRIER
movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
@@ -168,7 +166,6 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
subq MACRO_LITERAL(80 + 4 * 8), %rsp
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
- THIS_LOAD_REQUIRES_READ_BARRIER
movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Save FPRs.
movq %xmm0, 16(%rsp)
@@ -920,8 +917,12 @@ DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
// RDI: uint32_t type_idx, RSI: ArtMethod*
// RDX, RCX, R8, R9: free. RAX: return val.
+ // TODO: Add read barrier when this function is used.
+ // Might need a special macro since rsi and edx is 32b/64b mismatched.
movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array
UNPOISON_HEAP_REF edx
+ // TODO: Add read barrier when this function is used.
+ // Might need to break down into multiple instructions to get the base address in a register.
// Load the class
movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx
UNPOISON_HEAP_REF edx
@@ -1153,6 +1154,60 @@ DEFINE_FUNCTION art_quick_check_cast
END_FUNCTION art_quick_check_cast
+// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
+MACRO2(POP_REG_NE, reg, exclude_reg)
+ .ifc RAW_VAR(reg), RAW_VAR(exclude_reg)
+ addq MACRO_LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ .else
+ POP RAW_VAR(reg)
+ .endif
+END_MACRO
+
+ /*
+ * Macro to insert read barrier, used in art_quick_aput_obj and art_quick_alloc_object_tlab.
+ * obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as
+ * MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between
+ * 64b PUSH/POP and 32b argument.
+ * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
+ *
+ * As with art_quick_aput_obj* functions, the 64b versions are in comments.
+ */
+MACRO4(READ_BARRIER, obj_reg, offset, dest_reg32, dest_reg64)
+#ifdef USE_READ_BARRIER
+ PUSH rax // save registers that might be used
+ PUSH rdi
+ PUSH rsi
+ PUSH rdx
+ PUSH rcx
+ SETUP_FP_CALLEE_SAVE_FRAME
+ // Outgoing argument set up
+ // movl %edi, %edi // pass ref, no-op for now since parameter ref is unused
+ // // movq %rdi, %rdi
+ movl REG_VAR(obj_reg), %esi // pass obj_reg
+ // movq REG_VAR(obj_reg), %rsi
+ movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary
+ // movq MACRO_LITERAL((RAW_VAR(offset))), %rdx
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
+ // No need to unpoison return value in rax, artReadBarrierSlow() would do the unpoisoning.
+ .ifnc RAW_VAR(dest_reg32), eax
+ // .ifnc RAW_VAR(dest_reg64), rax
+ movl %eax, REG_VAR(dest_reg32) // save loaded ref in dest_reg
+ // movq %rax, REG_VAR(dest_reg64)
+ .endif
+ RESTORE_FP_CALLEE_SAVE_FRAME
+ POP_REG_NE rcx, RAW_VAR(dest_reg64) // Restore registers except dest_reg
+ POP_REG_NE rdx, RAW_VAR(dest_reg64)
+ POP_REG_NE rsi, RAW_VAR(dest_reg64)
+ POP_REG_NE rdi, RAW_VAR(dest_reg64)
+ POP_REG_NE rax, RAW_VAR(dest_reg64)
+#else
+ movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg32)
+ // movq RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg64)
+ UNPOISON_HEAP_REF RAW_VAR(dest_reg32) // UNPOISON_HEAP_REF only takes a 32b register
+#endif // USE_READ_BARRIER
+END_MACRO
+
/*
* Entry from managed code for array put operations of objects where the value being stored
* needs to be checked for compatibility.
@@ -1197,15 +1252,13 @@ DEFINE_FUNCTION art_quick_aput_obj
testl %edx, %edx // store of null
// test %rdx, %rdx
jz .Ldo_aput_null
- movl MIRROR_OBJECT_CLASS_OFFSET(%edi), %ecx
-// movq MIRROR_OBJECT_CLASS_OFFSET(%rdi), %rcx
- UNPOISON_HEAP_REF ecx
- movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ecx), %ecx
-// movq MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %rcx
- UNPOISON_HEAP_REF ecx
-#ifdef USE_HEAP_POISONING
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // rax is free.
- UNPOISON_HEAP_REF eax
+ READ_BARRIER edi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
+ // READ_BARRIER rdi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
+ READ_BARRIER ecx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
+ // READ_BARRIER rcx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
+#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
+ READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax // rax is free.
+ // READ_BARRIER rdx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax
cmpl %eax, %ecx // value's type == array's component type - trivial assignability
#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
@@ -1232,9 +1285,14 @@ DEFINE_FUNCTION art_quick_aput_obj
PUSH rdx
SETUP_FP_CALLEE_SAVE_FRAME
- // "Uncompress" = do nothing, as already zero-extended on load.
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
- UNPOISON_HEAP_REF esi
+#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
+ // The load of MIRROR_OBJECT_CLASS_OFFSET(%edx) is redundant, eax still holds the value.
+ movl %eax, %esi // Pass arg2 = value's class.
+ // movq %rax, %rsi
+#else
+ // "Uncompress" = do nothing, as already zero-extended on load.
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
+#endif
movq %rcx, %rdi // Pass arg1 = array's component type.
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
@@ -1735,3 +1793,14 @@ DEFINE_FUNCTION art_nested_signal_return
call PLT_SYMBOL(longjmp)
UNREACHABLE
END_FUNCTION art_nested_signal_return
+
+DEFINE_FUNCTION art_quick_read_barrier_slow
+ SETUP_FP_CALLEE_SAVE_FRAME
+ subq LITERAL(8), %rsp // Alignment padding.
+ CFI_ADJUST_CFA_OFFSET(8)
+ call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset)
+ addq LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ RESTORE_FP_CALLEE_SAVE_FRAME
+ ret
+END_FUNCTION art_quick_read_barrier_slow
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index f4f8eaf759..350a0d4c15 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -109,7 +109,7 @@ ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 150 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 151 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 40b3669299..38bc8186d5 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -16,6 +16,7 @@
#include "check_jni.h"
+#include <iomanip>
#include <sys/mman.h>
#include <zlib.h>
@@ -1083,10 +1084,29 @@ class ScopedCheck {
}
const char* errorKind = nullptr;
- uint8_t utf8 = CheckUtfBytes(bytes, &errorKind);
+ const uint8_t* utf8 = CheckUtfBytes(bytes, &errorKind);
if (errorKind != nullptr) {
+ // This is an expensive loop that will resize often, but this isn't supposed to hit in
+ // practice anyways.
+ std::ostringstream oss;
+ oss << std::hex;
+ const uint8_t* tmp = reinterpret_cast<const uint8_t*>(bytes);
+ while (*tmp != 0) {
+ if (tmp == utf8) {
+ oss << "<";
+ }
+ oss << "0x" << std::setfill('0') << std::setw(2) << static_cast<uint32_t>(*tmp);
+ if (tmp == utf8) {
+ oss << '>';
+ }
+ tmp++;
+ if (*tmp != 0) {
+ oss << ' ';
+ }
+ }
+
AbortF("input is not valid Modified UTF-8: illegal %s byte %#x\n"
- " string: '%s'", errorKind, utf8, bytes);
+ " string: '%s'\n input: '%s'", errorKind, *utf8, bytes, oss.str().c_str());
return false;
}
return true;
@@ -1094,11 +1114,11 @@ class ScopedCheck {
// Checks whether |bytes| is valid modified UTF-8. We also accept 4 byte UTF
// sequences in place of encoded surrogate pairs.
- static uint8_t CheckUtfBytes(const char* bytes, const char** errorKind) {
+ static const uint8_t* CheckUtfBytes(const char* bytes, const char** errorKind) {
while (*bytes != '\0') {
- uint8_t utf8 = *(bytes++);
+ const uint8_t* utf8 = reinterpret_cast<const uint8_t*>(bytes++);
// Switch on the high four bits.
- switch (utf8 >> 4) {
+ switch (*utf8 >> 4) {
case 0x00:
case 0x01:
case 0x02:
@@ -1118,11 +1138,11 @@ class ScopedCheck {
return utf8;
case 0x0f:
// Bit pattern 1111, which might be the start of a 4 byte sequence.
- if ((utf8 & 0x08) == 0) {
+ if ((*utf8 & 0x08) == 0) {
// Bit pattern 1111 0xxx, which is the start of a 4 byte sequence.
// We consume one continuation byte here, and fall through to consume two more.
- utf8 = *(bytes++);
- if ((utf8 & 0xc0) != 0x80) {
+ utf8 = reinterpret_cast<const uint8_t*>(bytes++);
+ if ((*utf8 & 0xc0) != 0x80) {
*errorKind = "continuation";
return utf8;
}
@@ -1135,8 +1155,8 @@ class ScopedCheck {
FALLTHROUGH_INTENDED;
case 0x0e:
// Bit pattern 1110, so there are two additional bytes.
- utf8 = *(bytes++);
- if ((utf8 & 0xc0) != 0x80) {
+ utf8 = reinterpret_cast<const uint8_t*>(bytes++);
+ if ((*utf8 & 0xc0) != 0x80) {
*errorKind = "continuation";
return utf8;
}
@@ -1146,8 +1166,8 @@ class ScopedCheck {
case 0x0c:
case 0x0d:
// Bit pattern 110x, so there is one additional byte.
- utf8 = *(bytes++);
- if ((utf8 & 0xc0) != 0x80) {
+ utf8 = reinterpret_cast<const uint8_t*>(bytes++);
+ if ((*utf8 & 0xc0) != 0x80) {
*errorKind = "continuation";
return utf8;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6a76bf7f07..3883246778 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1236,11 +1236,8 @@ void ClassLinker::InitFromImage() {
bool ClassLinker::ClassInClassTable(mirror::Class* klass) {
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- auto it = class_table_.Find(GcRoot<mirror::Class>(klass));
- if (it == class_table_.end()) {
- return false;
- }
- return it->Read() == klass;
+ ClassTable* const class_table = ClassTableForClassLoader(klass->GetClassLoader());
+ return class_table != nullptr && class_table->Contains(klass);
}
void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
@@ -1263,26 +1260,30 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
// Moving concurrent:
// Need to make sure to not copy ArtMethods without doing read barriers since the roots are
// marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy.
- for (GcRoot<mirror::Class>& root : class_table_) {
- buffered_visitor.VisitRoot(root);
+ std::vector<std::pair<GcRoot<mirror::ClassLoader>, ClassTable*>> reinsert;
+ for (auto it = classes_.begin(); it != classes_.end(); ) {
+ it->second->VisitRoots(visitor, flags);
+ const GcRoot<mirror::ClassLoader>& root = it->first;
+ mirror::ClassLoader* old_ref = root.Read<kWithoutReadBarrier>();
+ root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ mirror::ClassLoader* new_ref = root.Read<kWithoutReadBarrier>();
+ if (new_ref != old_ref) {
+ reinsert.push_back(*it);
+ it = classes_.erase(it);
+ } else {
+ ++it;
+ }
}
- // PreZygote classes can't move so we won't need to update fields' declaring classes.
- for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
- buffered_visitor.VisitRoot(root);
+ for (auto& pair : reinsert) {
+ classes_.Put(pair.first, pair.second);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
root.VisitRoot(visitor, RootInfo(kRootStickyClass));
mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
- if (UNLIKELY(new_ref != old_ref)) {
- // Uh ohes, GC moved a root in the log. Need to search the class_table and update the
- // corresponding object. This is slow, but luckily for us, this may only happen with a
- // concurrent moving GC.
- auto it = class_table_.Find(GcRoot<mirror::Class>(old_ref));
- DCHECK(it != class_table_.end());
- *it = GcRoot<mirror::Class>(new_ref);
- }
+ // Concurrent moving GC marked new roots through the to-space invariant.
+ CHECK_EQ(new_ref, old_ref);
}
}
buffered_visitor.Flush(); // Flush before clearing new_class_roots_.
@@ -1331,91 +1332,103 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
}
}
-void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) {
- if (dex_cache_image_class_lookup_required_) {
- MoveImageClassesToClassTable();
- }
- // TODO: why isn't this a ReaderMutexLock?
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (GcRoot<mirror::Class>& root : class_table_) {
- if (!visitor(root.Read(), arg)) {
- return;
- }
- }
- for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
- if (!visitor(root.Read(), arg)) {
+void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) {
+ for (auto& pair : classes_) {
+ ClassTable* const class_table = pair.second;
+ if (!class_table->Visit(visitor)) {
return;
}
}
}
-static bool GetClassesVisitorSet(mirror::Class* c, void* arg) {
- std::set<mirror::Class*>* classes = reinterpret_cast<std::set<mirror::Class*>*>(arg);
- classes->insert(c);
- return true;
+void ClassLinker::VisitClasses(ClassVisitor* visitor) {
+ if (dex_cache_image_class_lookup_required_) {
+ MoveImageClassesToClassTable();
+ }
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ // Not safe to have thread suspension when we are holding a lock.
+ if (self != nullptr) {
+ ScopedAssertNoThreadSuspension nts(self, __FUNCTION__);
+ VisitClassesInternal(visitor);
+ } else {
+ VisitClassesInternal(visitor);
+ }
}
-struct GetClassesVisitorArrayArg {
- Handle<mirror::ObjectArray<mirror::Class>>* classes;
- int32_t index;
- bool success;
+class GetClassesInToVector : public ClassVisitor {
+ public:
+ bool Visit(mirror::Class* klass) OVERRIDE {
+ classes_.push_back(klass);
+ return true;
+ }
+ std::vector<mirror::Class*> classes_;
};
-static bool GetClassesVisitorArray(mirror::Class* c, void* varg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- GetClassesVisitorArrayArg* arg = reinterpret_cast<GetClassesVisitorArrayArg*>(varg);
- if (arg->index < (*arg->classes)->GetLength()) {
- (*arg->classes)->Set(arg->index, c);
- arg->index++;
- return true;
- } else {
- arg->success = false;
+class GetClassInToObjectArray : public ClassVisitor {
+ public:
+ explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
+ : arr_(arr), index_(0) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ++index_;
+ if (index_ <= arr_->GetLength()) {
+ arr_->Set(index_ - 1, klass);
+ return true;
+ }
return false;
}
-}
-void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) {
+ bool Succeeded() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return index_ <= arr_->GetLength();
+ }
+
+ private:
+ mirror::ObjectArray<mirror::Class>* const arr_;
+ int32_t index_;
+};
+
+void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) {
// TODO: it may be possible to avoid secondary storage if we iterate over dex caches. The problem
// is avoiding duplicates.
if (!kMovingClasses) {
- std::set<mirror::Class*> classes;
- VisitClasses(GetClassesVisitorSet, &classes);
- for (mirror::Class* klass : classes) {
- if (!visitor(klass, arg)) {
+ GetClassesInToVector accumulator;
+ VisitClasses(&accumulator);
+ for (mirror::Class* klass : accumulator.classes_) {
+ if (!visitor->Visit(klass)) {
return;
}
}
} else {
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
StackHandleScope<1> hs(self);
- MutableHandle<mirror::ObjectArray<mirror::Class>> classes =
- hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
- GetClassesVisitorArrayArg local_arg;
- local_arg.classes = &classes;
- local_arg.success = false;
+ auto classes = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
// We size the array assuming classes won't be added to the class table during the visit.
// If this assumption fails we iterate again.
- while (!local_arg.success) {
+ while (true) {
size_t class_table_size;
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- class_table_size = class_table_.Size() + pre_zygote_class_table_.Size();
+ // Add 100 in case new classes get loaded when we are filling in the object array.
+ class_table_size = NumZygoteClasses() + NumNonZygoteClasses() + 100;
}
mirror::Class* class_type = mirror::Class::GetJavaLangClass();
mirror::Class* array_of_class = FindArrayClass(self, &class_type);
classes.Assign(
mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, class_table_size));
CHECK(classes.Get() != nullptr); // OOME.
- local_arg.index = 0;
- local_arg.success = true;
- VisitClasses(GetClassesVisitorArray, &local_arg);
+ GetClassInToObjectArray accumulator(classes.Get());
+ VisitClasses(&accumulator);
+ if (accumulator.Succeeded()) {
+ break;
+ }
}
for (int32_t i = 0; i < classes->GetLength(); ++i) {
// If the class table shrank during creation of the clases array we expect null elements. If
// the class table grew then the loop repeats. If classes are created after the loop has
// finished then we don't visit.
mirror::Class* klass = classes->Get(i);
- if (klass != nullptr && !visitor(klass, arg)) {
+ if (klass != nullptr && !visitor->Visit(klass)) {
return;
}
}
@@ -1443,6 +1456,7 @@ ClassLinker::~ClassLinker() {
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
STLDeleteElements(&oat_files_);
+ STLDeleteValues(&classes_);
}
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
@@ -2458,8 +2472,8 @@ void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
bool ClassLinker::IsDexFileRegisteredLocked(const DexFile& dex_file) {
dex_lock_.AssertSharedHeld(Thread::Current());
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* dex_cache = GetDexCache(i);
+ for (GcRoot<mirror::DexCache>& root : dex_caches_) {
+ mirror::DexCache* dex_cache = root.Read();
if (dex_cache->GetDexFile() == &dex_file) {
return true;
}
@@ -2757,8 +2771,7 @@ mirror::Class* ClassLinker::FindPrimitiveClass(char type) {
return nullptr;
}
-mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass,
- size_t hash) {
+mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass, size_t hash) {
if (VLOG_IS_ON(class_linker)) {
mirror::DexCache* dex_cache = klass->GetDexCache();
std::string source;
@@ -2769,11 +2782,13 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k
LOG(INFO) << "Loaded class " << descriptor << source;
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- mirror::Class* existing = LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
+ mirror::ClassLoader* const class_loader = klass->GetClassLoader();
+ ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
+ mirror::Class* existing = class_table->Lookup(descriptor, hash);
if (existing != nullptr) {
return existing;
}
- if (kIsDebugBuild && !klass->IsTemp() && klass->GetClassLoader() == nullptr &&
+ if (kIsDebugBuild && !klass->IsTemp() && class_loader == nullptr &&
dex_cache_image_class_lookup_required_) {
// Check a class loaded with the system class loader matches one in the image if the class
// is in the image.
@@ -2783,7 +2798,7 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k
}
}
VerifyObject(klass);
- class_table_.InsertWithHash(GcRoot<mirror::Class>(klass), hash);
+ class_table->InsertWithHash(klass, hash);
if (log_new_class_table_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
@@ -2802,95 +2817,41 @@ void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new
}
}
-mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass,
- size_t hash) {
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- auto existing_it = class_table_.FindWithHash(std::make_pair(descriptor, klass->GetClassLoader()),
- hash);
- CHECK(existing_it != class_table_.end());
- mirror::Class* existing = existing_it->Read();
- CHECK_NE(existing, klass) << descriptor;
- CHECK(!existing->IsResolved()) << descriptor;
- CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor;
-
- CHECK(!klass->IsTemp()) << descriptor;
- if (kIsDebugBuild && klass->GetClassLoader() == nullptr &&
- dex_cache_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- existing = LookupClassFromImage(descriptor);
- if (existing != nullptr) {
- CHECK_EQ(klass, existing) << descriptor;
- }
- }
- VerifyObject(klass);
-
- // Update the element in the hash set.
- *existing_it = GcRoot<mirror::Class>(klass);
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
- }
-
- return existing;
-}
-
bool ClassLinker::RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- auto pair = std::make_pair(descriptor, class_loader);
- auto it = class_table_.Find(pair);
- if (it != class_table_.end()) {
- class_table_.Erase(it);
- return true;
- }
- it = pre_zygote_class_table_.Find(pair);
- if (it != pre_zygote_class_table_.end()) {
- pre_zygote_class_table_.Erase(it);
- return true;
- }
- return false;
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ return class_table != nullptr && class_table->Remove(descriptor);
}
mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor, size_t hash,
mirror::ClassLoader* class_loader) {
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- mirror::Class* result = LookupClassFromTableLocked(descriptor, class_loader, hash);
- if (result != nullptr) {
- return result;
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ if (class_table != nullptr) {
+ mirror::Class* result = class_table->Lookup(descriptor, hash);
+ if (result != nullptr) {
+ return result;
+ }
}
}
if (class_loader != nullptr || !dex_cache_image_class_lookup_required_) {
return nullptr;
- } else {
- // Lookup failed but need to search dex_caches_.
- mirror::Class* result = LookupClassFromImage(descriptor);
- if (result != nullptr) {
- InsertClass(descriptor, result, hash);
- } else {
- // Searching the image dex files/caches failed, we don't want to get into this situation
- // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image
- // classes into the class table.
- constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
- if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
- MoveImageClassesToClassTable();
- }
- }
- return result;
}
-}
-
-mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor,
- mirror::ClassLoader* class_loader,
- size_t hash) {
- auto descriptor_pair = std::make_pair(descriptor, class_loader);
- auto it = pre_zygote_class_table_.FindWithHash(descriptor_pair, hash);
- if (it == pre_zygote_class_table_.end()) {
- it = class_table_.FindWithHash(descriptor_pair, hash);
- if (it == class_table_.end()) {
- return nullptr;
+ // Lookup failed but need to search dex_caches_.
+ mirror::Class* result = LookupClassFromImage(descriptor);
+ if (result != nullptr) {
+ result = InsertClass(descriptor, result, hash);
+ } else {
+ // Searching the image dex files/caches failed, we don't want to get into this situation
+ // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image
+ // classes into the class table.
+ constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
+ if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
+ MoveImageClassesToClassTable();
}
}
- return it->Read();
+ return result;
}
static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
@@ -2910,6 +2871,7 @@ void ClassLinker::MoveImageClassesToClassTable() {
ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table");
mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
std::string temp;
+ ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
mirror::ObjectArray<mirror::Class>* types = dex_cache->GetResolvedTypes();
@@ -2919,12 +2881,12 @@ void ClassLinker::MoveImageClassesToClassTable() {
DCHECK(klass->GetClassLoader() == nullptr);
const char* descriptor = klass->GetDescriptor(&temp);
size_t hash = ComputeModifiedUtf8Hash(descriptor);
- mirror::Class* existing = LookupClassFromTableLocked(descriptor, nullptr, hash);
+ mirror::Class* existing = class_table->Lookup(descriptor, hash);
if (existing != nullptr) {
CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
<< PrettyClassAndClassLoader(klass);
} else {
- class_table_.Insert(GcRoot<mirror::Class>(klass));
+ class_table->Insert(klass);
if (log_new_class_table_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
}
@@ -2937,9 +2899,9 @@ void ClassLinker::MoveImageClassesToClassTable() {
void ClassLinker::MoveClassTableToPreZygote() {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- DCHECK(pre_zygote_class_table_.Empty());
- pre_zygote_class_table_ = std::move(class_table_);
- class_table_.Clear();
+ for (auto& class_table : classes_) {
+ class_table.second->FreezeSnapshot();
+ }
}
mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
@@ -2971,31 +2933,13 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas
MoveImageClassesToClassTable();
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- while (true) {
- auto it = class_table_.Find(descriptor);
- if (it == class_table_.end()) {
- break;
- }
- result.push_back(it->Read());
- class_table_.Erase(it);
- }
- for (mirror::Class* k : result) {
- class_table_.Insert(GcRoot<mirror::Class>(k));
- }
- size_t pre_zygote_start = result.size();
- // Now handle the pre zygote table.
- // Note: This dirties the pre-zygote table but shouldn't be an issue since LookupClasses is only
- // called from the debugger.
- while (true) {
- auto it = pre_zygote_class_table_.Find(descriptor);
- if (it == pre_zygote_class_table_.end()) {
- break;
+ for (auto& pair : classes_) {
+ // There can only be one class with the same descriptor per class loader.
+ ClassTable* const class_table = pair.second;
+ mirror::Class* klass = class_table->Lookup(descriptor, ComputeModifiedUtf8Hash(descriptor));
+ if (klass != nullptr) {
+ result.push_back(klass);
}
- result.push_back(it->Read());
- pre_zygote_class_table_.Erase(it);
- }
- for (size_t i = pre_zygote_start; i < result.size(); ++i) {
- pre_zygote_class_table_.Insert(GcRoot<mirror::Class>(result[i]));
}
}
@@ -3303,7 +3247,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
std::string descriptor(GetDescriptorForProxy(klass.Get()));
- size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
+ const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
@@ -4046,6 +3990,25 @@ void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class,
}
}
+ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) {
+ auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader));
+ if (it != classes_.end()) {
+ return it->second;
+ }
+ // Class table for loader not found, add it to the table.
+ auto* const class_table = new ClassTable;
+ classes_.Put(GcRoot<mirror::ClassLoader>(class_loader), class_table);
+ return class_table;
+}
+
+ClassTable* ClassLinker::ClassTableForClassLoader(mirror::ClassLoader* class_loader) {
+ auto it = classes_.find(GcRoot<mirror::ClassLoader>(class_loader));
+ if (it != classes_.end()) {
+ return it->second;
+ }
+ return nullptr;
+}
+
bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
MutableHandle<mirror::Class>* h_new_class_out) {
@@ -4096,9 +4059,26 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
CHECK_EQ(h_new_class->GetClassSize(), class_size);
ObjectLock<mirror::Class> lock(self, h_new_class);
FixupTemporaryDeclaringClass(klass.Get(), h_new_class.Get());
- mirror::Class* existing = UpdateClass(descriptor, h_new_class.Get(),
- ComputeModifiedUtf8Hash(descriptor));
- CHECK(existing == nullptr || existing == klass.Get());
+
+ {
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ mirror::ClassLoader* const class_loader = h_new_class.Get()->GetClassLoader();
+ ClassTable* const table = InsertClassTableForClassLoader(class_loader);
+ mirror::Class* existing = table->UpdateClass(descriptor, h_new_class.Get(),
+ ComputeModifiedUtf8Hash(descriptor));
+ CHECK_EQ(existing, klass.Get());
+ if (kIsDebugBuild && class_loader == nullptr && dex_cache_image_class_lookup_required_) {
+ // Check a class loaded with the system class loader matches one in the image if the class
+ // is in the image.
+ mirror::Class* const image_class = LookupClassFromImage(descriptor);
+ if (image_class != nullptr) {
+ CHECK_EQ(klass.Get(), existing) << descriptor;
+ }
+ }
+ if (log_new_class_table_roots_) {
+ new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get()));
+ }
+ }
// This will notify waiters on temp class that saw the not yet resolved class in the
// class_table_ during EnsureResolved.
@@ -5589,23 +5569,22 @@ const char* ClassLinker::MethodShorty(uint32_t method_idx, ArtMethod* referrer,
return dex_file.GetMethodShorty(method_id, length);
}
-void ClassLinker::DumpAllClasses(int flags) {
- if (dex_cache_image_class_lookup_required_) {
- MoveImageClassesToClassTable();
- }
- // TODO: at the time this was written, it wasn't safe to call PrettyField with the ClassLinker
- // lock held, because it might need to resolve a field's type, which would try to take the lock.
- std::vector<mirror::Class*> all_classes;
- {
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (GcRoot<mirror::Class>& it : class_table_) {
- all_classes.push_back(it.Read());
- }
- }
+class DumpClassVisitor : public ClassVisitor {
+ public:
+ explicit DumpClassVisitor(int flags) : flags_(flags) {}
- for (size_t i = 0; i < all_classes.size(); ++i) {
- all_classes[i]->DumpClass(std::cerr, flags);
+ bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ klass->DumpClass(LOG(ERROR), flags_);
+ return true;
}
+
+ private:
+ const int flags_;
+};
+
+void ClassLinker::DumpAllClasses(int flags) {
+ DumpClassVisitor visitor(flags);
+ VisitClasses(&visitor);
}
static OatFile::OatMethod CreateOatMethod(const void* code) {
@@ -5658,8 +5637,24 @@ void ClassLinker::DumpForSigQuit(std::ostream& os) {
MoveImageClassesToClassTable();
}
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- os << "Zygote loaded classes=" << pre_zygote_class_table_.Size() << " post zygote classes="
- << class_table_.Size() << "\n";
+ os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
+ << NumNonZygoteClasses() << "\n";
+}
+
+size_t ClassLinker::NumZygoteClasses() const {
+ size_t sum = 0;
+ for (auto& pair : classes_) {
+ sum += pair.second->NumZygoteClasses();
+ }
+ return sum;
+}
+
+size_t ClassLinker::NumNonZygoteClasses() const {
+ size_t sum = 0;
+ for (auto& pair : classes_) {
+ sum += pair.second->NumNonZygoteClasses();
+ }
+ return sum;
}
size_t ClassLinker::NumLoadedClasses() {
@@ -5668,7 +5663,7 @@ size_t ClassLinker::NumLoadedClasses() {
}
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// Only return non zygote classes since these are the ones which apps which care about.
- return class_table_.Size();
+ return NumNonZygoteClasses();
}
pid_t ClassLinker::GetClassesLockOwner() {
@@ -5739,43 +5734,6 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) {
return descriptor;
}
-std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root)
- const {
- std::string temp;
- return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp));
-}
-
-bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
- const GcRoot<mirror::Class>& b) const {
- if (a.Read()->GetClassLoader() != b.Read()->GetClassLoader()) {
- return false;
- }
- std::string temp;
- return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
-}
-
-std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(
- const std::pair<const char*, mirror::ClassLoader*>& element) const {
- return ComputeModifiedUtf8Hash(element.first);
-}
-
-bool ClassLinker::ClassDescriptorHashEquals::operator()(
- const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) const {
- if (a.Read()->GetClassLoader() != b.second) {
- return false;
- }
- return a.Read()->DescriptorEquals(b.first);
-}
-
-bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
- const char* descriptor) const {
- return a.Read()->DescriptorEquals(descriptor);
-}
-
-std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descriptor) const {
- return ComputeModifiedUtf8Hash(descriptor);
-}
-
bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
if (Runtime::Current()->UseJit()) {
// JIT can have direct code pointers from any method to any other method.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 05a809e524..c53ff616e5 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -25,6 +25,7 @@
#include "base/hash_set.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "class_table.h"
#include "dex_file.h"
#include "gc_root.h"
#include "jni.h"
@@ -56,8 +57,6 @@ class Runtime;
class ScopedObjectAccessAlreadyRunnable;
template<size_t kNumReferences> class PACKED(4) StackHandleScope;
-typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
-
enum VisitRootFlags : uint8_t;
class ClassLinker {
@@ -289,14 +288,14 @@ class ClassLinker {
const OatFile* GetPrimaryOatFile()
REQUIRES(!dex_lock_);
- void VisitClasses(ClassVisitor* visitor, void* arg)
+ void VisitClasses(ClassVisitor* visitor)
REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
// Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
// so that it can visit individual classes without holding the doesn't hold the
// Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
// can race with insertion and deletion of classes while the visitor is being called.
- void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg)
+ void VisitClassesWithoutClassesLock(ClassVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
@@ -476,9 +475,28 @@ class ClassLinker {
void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
private:
+ class CompareClassLoaderGcRoot {
+ public:
+ bool operator()(const GcRoot<mirror::ClassLoader>& a, const GcRoot<mirror::ClassLoader>& b)
+ const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return a.Read() < b.Read();
+ }
+ };
+
+ typedef SafeMap<GcRoot<mirror::ClassLoader>, ClassTable*, CompareClassLoaderGcRoot>
+ ClassLoaderClassTable;
+
+ void VisitClassesInternal(ClassVisitor* visitor)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns the number of zygote and image classes.
+ size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
+ // Returns the number of non zygote nor image classes.
+ size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
- REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
void FinishInit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
@@ -543,8 +561,7 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- REQUIRES(dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
bool IsDexFileRegisteredLocked(const DexFile& dex_file)
SHARED_REQUIRES(dex_lock_, Locks::mutator_lock_);
@@ -568,7 +585,7 @@ class ClassLinker {
bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
MutableHandle<mirror::Class>* h_new_class_out)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::classlinker_classes_lock_);
bool LinkSuperClass(Handle<mirror::Class> klass)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -576,7 +593,8 @@ class ClassLinker {
bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
- bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
+ bool LinkMethods(Thread* self,
+ Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -632,18 +650,16 @@ class ClassLinker {
void EnsurePreverifiedMethods(Handle<mirror::Class> c)
SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Class* LookupClassFromTableLocked(const char* descriptor,
- mirror::ClassLoader* class_loader,
- size_t hash)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
-
- mirror::Class* UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash)
- REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
mirror::Class* LookupClassFromImage(const char* descriptor)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Returns null if not found.
+ ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
+ // Insert a new class table if not found.
+ ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::classlinker_classes_lock_);
+
// EnsureResolved is called to make sure that a class in the class_table_ has been resolved
// before returning it to the caller. Its the responsibility of the thread that placed the class
// in the table to make it resolved. The thread doing resolution must notify on the class' lock
@@ -690,43 +706,11 @@ class ClassLinker {
std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
- class ClassDescriptorHashEquals {
- public:
- // Same class loader and descriptor.
- std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
- bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const
- NO_THREAD_SAFETY_ANALYSIS;
- // Same class loader and descriptor.
- std::size_t operator()(const std::pair<const char*, mirror::ClassLoader*>& element) const
- NO_THREAD_SAFETY_ANALYSIS;
- bool operator()(const GcRoot<mirror::Class>& a,
- const std::pair<const char*, mirror::ClassLoader*>& b) const
- NO_THREAD_SAFETY_ANALYSIS;
- // Same descriptor.
- bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const
- NO_THREAD_SAFETY_ANALYSIS;
- std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS;
- };
- class GcRootEmptyFn {
- public:
- void MakeEmpty(GcRoot<mirror::Class>& item) const {
- item = GcRoot<mirror::Class>();
- }
- bool IsEmpty(const GcRoot<mirror::Class>& item) const {
- return item.IsNull();
- }
- };
+ // This contains strong roots. To enable concurrent root scanning of the class table.
+ ClassLoaderClassTable classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
- // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
- // should be compared for a matching Class descriptor and class loader.
- typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
- ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
- Table;
- // This contains strong roots. To enable concurrent root scanning of
- // the class table, be careful to use a read barrier when accessing this.
- Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
- Table pre_zygote_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
- std::vector<GcRoot<mirror::Class>> new_class_roots_;
+ // New class roots, only used by CMS since the GC needs to mark these in the pause.
+ std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
// Do we need to search dex caches to find image classes?
bool dex_cache_image_class_lookup_required_;
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
new file mode 100644
index 0000000000..c245d4e780
--- /dev/null
+++ b/runtime/class_table.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_table.h"
+
+#include "mirror/class-inl.h"
+
+namespace art {
+
+ClassTable::ClassTable() {
+ classes_.push_back(ClassSet());
+}
+
+void ClassTable::FreezeSnapshot() {
+ classes_.push_back(ClassSet());
+}
+
+bool ClassTable::Contains(mirror::Class* klass) {
+ for (ClassSet& class_set : classes_) {
+ auto it = class_set.Find(GcRoot<mirror::Class>(klass));
+ if (it != class_set.end()) {
+ return it->Read() == klass;
+ }
+ }
+ return false;
+}
+
+mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
+ // Should only be updating latest table.
+ auto existing_it = classes_.back().FindWithHash(descriptor, hash);
+ if (kIsDebugBuild && existing_it == classes_.back().end()) {
+ for (const ClassSet& class_set : classes_) {
+ if (class_set.FindWithHash(descriptor, hash) != class_set.end()) {
+ LOG(FATAL) << "Updating class found in frozen table " << descriptor;
+ }
+ }
+ LOG(FATAL) << "Updating class not found " << descriptor;
+ }
+ mirror::Class* const existing = existing_it->Read();
+ CHECK_NE(existing, klass) << descriptor;
+ CHECK(!existing->IsResolved()) << descriptor;
+ CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor;
+ CHECK(!klass->IsTemp()) << descriptor;
+ VerifyObject(klass);
+ // Update the element in the hash set with the new class. This is safe to do since the descriptor
+ // doesn't change.
+ *existing_it = GcRoot<mirror::Class>(klass);
+ return existing;
+}
+
+void ClassTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags ATTRIBUTE_UNUSED) {
+ BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
+ visitor, RootInfo(kRootStickyClass));
+ for (ClassSet& class_set : classes_) {
+ for (GcRoot<mirror::Class>& root : class_set) {
+ buffered_visitor.VisitRoot(root);
+ }
+ }
+}
+
+bool ClassTable::Visit(ClassVisitor* visitor) {
+ for (ClassSet& class_set : classes_) {
+ for (GcRoot<mirror::Class>& root : class_set) {
+ if (!visitor->Visit(root.Read())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+size_t ClassTable::NumZygoteClasses() const {
+ size_t sum = 0;
+ for (size_t i = 0; i < classes_.size() - 1; ++i) {
+ sum += classes_[i].Size();
+ }
+ return sum;
+}
+
+size_t ClassTable::NumNonZygoteClasses() const {
+ return classes_.back().Size();
+}
+
+mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
+ for (ClassSet& class_set : classes_) {
+ auto it = class_set.FindWithHash(descriptor, hash);
+ if (it != class_set.end()) {
+ return it->Read();
+ }
+ }
+ return nullptr;
+}
+
+void ClassTable::Insert(mirror::Class* klass) {
+ classes_.back().Insert(GcRoot<mirror::Class>(klass));
+}
+
+void ClassTable::InsertWithHash(mirror::Class* klass, size_t hash) {
+ classes_.back().InsertWithHash(GcRoot<mirror::Class>(klass), hash);
+}
+
+bool ClassTable::Remove(const char* descriptor) {
+ for (ClassSet& class_set : classes_) {
+ auto it = class_set.Find(descriptor);
+ if (it != class_set.end()) {
+ class_set.Erase(it);
+ return true;
+ }
+ }
+ return false;
+}
+
+std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root)
+ const {
+ std::string temp;
+ return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp));
+}
+
+bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+ const GcRoot<mirror::Class>& b) const {
+ DCHECK_EQ(a.Read()->GetClassLoader(), b.Read()->GetClassLoader());
+ std::string temp;
+ return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
+}
+
+bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+ const char* descriptor) const {
+ return a.Read()->DescriptorEquals(descriptor);
+}
+
+std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const char* descriptor) const {
+ return ComputeModifiedUtf8Hash(descriptor);
+}
+
+} // namespace art
diff --git a/runtime/class_table.h b/runtime/class_table.h
new file mode 100644
index 0000000000..252a47dd25
--- /dev/null
+++ b/runtime/class_table.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CLASS_TABLE_H_
+#define ART_RUNTIME_CLASS_TABLE_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "base/allocator.h"
+#include "base/hash_set.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+#include "gc_root.h"
+#include "object_callbacks.h"
+#include "runtime.h"
+
+namespace art {
+
+namespace mirror {
+ class ClassLoader;
+} // namespace mirror
+
+class ClassVisitor {
+ public:
+ virtual ~ClassVisitor() {}
+ // Return true to continue visiting.
+ virtual bool Visit(mirror::Class* klass) = 0;
+};
+
+// Each loader has a ClassTable
+class ClassTable {
+ public:
+ ClassTable();
+
+ // Used by image writer for checking.
+ bool Contains(mirror::Class* klass)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Freeze the current class tables by allocating a new table and never updating or modifying the
+ // existing table. This helps prevents dirty pages after caused by inserting after zygote fork.
+ void FreezeSnapshot()
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns the number of classes in previous snapshots.
+ size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
+ // Returns all off the classes in the lastest snapshot.
+ size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_);
+
+ // Update a class in the table with the new class. Returns the existing class which was replaced.
+ mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Return false if the callback told us to exit.
+ bool Visit(ClassVisitor* visitor)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ mirror::Class* Lookup(const char* descriptor, size_t hash)
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+
+ void Insert(mirror::Class* klass)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void InsertWithHash(mirror::Class* klass, size_t hash)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns true if the class was found and removed, false otherwise.
+ bool Remove(const char* descriptor)
+ REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ private:
+ class ClassDescriptorHashEquals {
+ public:
+ // Same class loader and descriptor.
+ std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
+ bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const
+ NO_THREAD_SAFETY_ANALYSIS;;
+ // Same descriptor.
+ bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const
+ NO_THREAD_SAFETY_ANALYSIS;
+ std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS;
+ };
+ class GcRootEmptyFn {
+ public:
+ void MakeEmpty(GcRoot<mirror::Class>& item) const {
+ item = GcRoot<mirror::Class>();
+ }
+ bool IsEmpty(const GcRoot<mirror::Class>& item) const {
+ return item.IsNull();
+ }
+ };
+ // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+ // should be compared for a matching Class descriptor and class loader.
+ typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
+ ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
+ ClassSet;
+
+ // TODO: shard lock to have one per class loader.
+ std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CLASS_TABLE_H_
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 287a50bb04..f0de65b816 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -948,33 +948,27 @@ JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf*
return JDWP::ERR_NONE;
}
-void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
- // Get the complete list of reference classes (i.e. all classes except
- // the primitive types).
- // Returns a newly-allocated buffer full of RefTypeId values.
- struct ClassListCreator {
- explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) {
- }
+// Get the complete list of reference classes (i.e. all classes except
+// the primitive types).
+// Returns a newly-allocated buffer full of RefTypeId values.
+class ClassListCreator : public ClassVisitor {
+ public:
+ explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
- static bool Visit(mirror::Class* c, void* arg) {
- return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
- }
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
- if (!c->IsPrimitive()) {
- classes->push_back(gRegistry->AddRefType(c));
- }
- return true;
+ bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!c->IsPrimitive()) {
+ classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
}
+ return true;
+ }
- std::vector<JDWP::RefTypeId>* const classes;
- };
+ private:
+ std::vector<JDWP::RefTypeId>* const classes_;
+};
+void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
ClassListCreator clc(classes);
- Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
- &clc);
+ Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc);
}
JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index cef2510451..3d3f7a1bdb 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -20,6 +20,7 @@
#include <jni.h>
#include "base/macros.h"
+#include "base/mutex.h"
#include "offsets.h"
#define QUICK_ENTRYPOINT_OFFSET(ptr_size, x) \
@@ -71,6 +72,16 @@ extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_o
Thread* self)
NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+// Read barrier entrypoints.
+// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to this function directly.
+// For x86 and x86_64, compilers need a wrapper assembly function, to handle mismatch in ABI.
+// This is the read barrier slow path for instance and static fields and reference-type arrays.
+// TODO: Currently the read barrier does not have a fast path for compilers to directly generate.
+// Ideally the slow path should only take one parameter "ref".
+extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref, mirror::Object* obj,
+ uint32_t offset)
+ SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 60bbf4ac82..73d8ae76ae 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -145,7 +145,8 @@
V(NewStringFromStringBuffer, void) \
V(NewStringFromStringBuilder, void) \
\
- V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*)
+ V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
+ V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t)
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
#undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint.
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 25a943a82a..0a1d80648d 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -557,4 +557,16 @@ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj
return -1; // failure
}
+// TODO: Currently the read barrier does not have a fast path. Ideally the slow path should only
+// take one parameter "ref", which is generated by the fast path.
+extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED,
+ mirror::Object* obj, uint32_t offset) {
+ DCHECK(kUseReadBarrier);
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset;
+ mirror::HeapReference<mirror::Object>* ref_addr =
+ reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr);
+ return ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, true>(obj, MemberOffset(offset),
+ ref_addr);
+}
+
} // namespace art
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index c05c93555c..f7a3cd53cd 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -311,8 +311,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni,
sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierSlow, sizeof(void*));
- CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierJni)
+ CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierSlow)
+ sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 9711cf238e..e28d578121 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -49,12 +49,20 @@ constexpr bool kVerboseInstrumentation = false;
static constexpr StackVisitor::StackWalkKind kInstrumentationStackWalk =
StackVisitor::StackWalkKind::kSkipInlinedFrames;
-static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
- REQUIRES(Locks::mutator_lock_) {
- Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
- instrumentation->InstallStubsForClass(klass);
- return true; // we visit all classes.
-}
+class InstallStubsClassVisitor : public ClassVisitor {
+ public:
+ explicit InstallStubsClassVisitor(Instrumentation* instrumentation)
+ : instrumentation_(instrumentation) {}
+
+ bool Visit(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ instrumentation_->InstallStubsForClass(klass);
+ return true; // we visit all classes.
+ }
+
+ private:
+ Instrumentation* const instrumentation_;
+};
+
Instrumentation::Instrumentation()
: instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false),
@@ -563,14 +571,16 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir
entry_exit_stubs_installed_ = true;
interpreter_stubs_installed_ = false;
}
- runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
+ InstallStubsClassVisitor visitor(this);
+ runtime->GetClassLinker()->VisitClasses(&visitor);
instrumentation_stubs_installed_ = true;
MutexLock mu(self, *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(InstrumentationInstallStack, this);
} else {
interpreter_stubs_installed_ = false;
entry_exit_stubs_installed_ = false;
- runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
+ InstallStubsClassVisitor visitor(this);
+ runtime->GetClassLinker()->VisitClasses(&visitor);
// Restore stack only if there is no method currently deoptimized.
bool empty;
{
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 069e346a8d..6568487df9 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -913,6 +913,33 @@ inline void Class::CheckPointerSize(size_t pointer_size) {
DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline Class* Class::GetComponentType() {
+ return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset());
+}
+
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool Class::IsArrayClass() {
+ return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
+}
+
+inline bool Class::IsAssignableFrom(Class* src) {
+ DCHECK(src != nullptr);
+ if (this == src) {
+ // Can always assign to things of the same type.
+ return true;
+ } else if (IsObjectClass()) {
+ // Can assign any reference to java.lang.Object.
+ return !src->IsPrimitive();
+ } else if (IsInterface()) {
+ return src->Implements(this);
+ } else if (src->IsArrayClass()) {
+ return IsAssignableFromArray(src);
+ } else {
+ return !src->IsInterface() && src->IsSubClass(this);
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c01a5e8f1a..d95bcd80e5 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -404,9 +404,8 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
- }
+
+ bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -423,9 +422,7 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset());
- }
+ Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_);
void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(GetComponentType() == nullptr);
@@ -617,22 +614,7 @@ class MANAGED Class FINAL : public Object {
// downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
- ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(src != nullptr);
- if (this == src) {
- // Can always assign to things of the same type.
- return true;
- } else if (IsObjectClass()) {
- // Can assign any reference to java.lang.Object.
- return !src->IsPrimitive();
- } else if (IsInterface()) {
- return src->Implements(this);
- } else if (src->IsArrayClass()) {
- return IsAssignableFromArray(src);
- } else {
- return !src->IsInterface() && src->IsSubClass(this);
- }
- }
+ ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/oat.h b/runtime/oat.h
index ee2f3f60f3..29dd76ce5e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '6', '7', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '6', '8', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h
index 4f408dd5c1..710c21f03e 100644
--- a/runtime/read_barrier_c.h
+++ b/runtime/read_barrier_c.h
@@ -47,9 +47,4 @@
#error "Only one of Baker or Brooks can be enabled at a time."
#endif
-// A placeholder marker to indicate places to add read barriers in the
-// assembly code. This is a development time aid and to be removed
-// after read barriers are added.
-#define THIS_LOAD_REQUIRES_READ_BARRIER
-
#endif // ART_RUNTIME_READ_BARRIER_C_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6949b0bd34..2b977af51c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2304,6 +2304,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
+ QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
#undef QUICK_ENTRY_POINT_INFO
os << offset;
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index 4db116a128..c108a900e2 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -157,6 +157,31 @@ public class Main {
return x;
}
+ /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (before)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt
+ /// CHECK-DAG: Return [<<Result>>]
+
+ private static int returnAbs(int i) {
+ return Math.abs(i);
+ }
+
+ /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (before)
+ /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (after)
+ /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt
+ /// CHECK-DAG: Return [<<Result>>]
+
+ public static int InlinedIntrinsicsAreStillIntrinsic() {
+ return returnAbs(-1);
+ }
private static void returnVoid() {
return;
@@ -238,5 +263,13 @@ public class Main {
if (InlineWithControlFlow(false) != 2) {
throw new Error();
}
+
+ if (InlinedIntrinsicsAreStillIntrinsic() != 1) {
+ throw new Error();
+ }
+
+ if (returnAbs(-1) != 1) {
+ throw new Error();
+ }
}
}
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 014f59a506..251a53e456 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-
interface Interface {
void $noinline$f();
}
@@ -52,6 +51,15 @@ class SubclassB extends Super {
}
}
+class Generic<A> {
+ private A a = null;
+ public A get() {
+ return a;
+ }
+}
+
+final class Final {}
+
public class Main {
/// CHECK-START: void Main.testSimpleRemove() instruction_simplifier_after_types (before)
@@ -395,6 +403,104 @@ public class Main {
((SubclassA)a[0]).$noinline$g();
}
+ private Generic<SubclassC> genericC = new Generic<SubclassC>();
+ private Generic<Final> genericFinal = new Generic<Final>();
+
+ private SubclassC get() {
+ return genericC.get();
+ }
+
+ private Final getFinal() {
+ return genericFinal.get();
+ }
+
+ /// CHECK-START: SubclassC Main.inlineGenerics() reference_type_propagation (after)
+ /// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:SubclassC exact:false
+ /// CHECK-NEXT: Return [<<Invoke>>]
+
+ /// CHECK-START: SubclassC Main.inlineGenerics() reference_type_propagation_after_inlining (after)
+ /// CHECK: <<BoundType:l\d+>> BoundType klass:SubclassC exact:false
+ /// CHECK: Return [<<BoundType>>]
+ private SubclassC inlineGenerics() {
+ SubclassC c = get();
+ return c;
+ }
+
+ /// CHECK-START: Final Main.inlineGenericsFinal() reference_type_propagation (after)
+ /// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:Final exact:true
+ /// CHECK-NEXT: Return [<<Invoke>>]
+
+ /// CHECK-START: Final Main.inlineGenericsFinal() reference_type_propagation_after_inlining (after)
+ /// CHECK: <<BoundType:l\d+>> BoundType klass:Final exact:true
+ /// CHECK: Return [<<BoundType>>]
+ private Final inlineGenericsFinal() {
+ Final f = getFinal();
+ return f;
+ }
+
+ /// CHECK-START: void Main.boundOnlyOnceIfNotNull(java.lang.Object) reference_type_propagation_after_inlining (after)
+ /// CHECK: BoundType
+ /// CHECK-NOT: BoundType
+ private void boundOnlyOnceIfNotNull(Object o) {
+ if (o != null) {
+ o.toString();
+ }
+ }
+
+ /// CHECK-START: void Main.boundOnlyOnceIfInstanceOf(java.lang.Object) reference_type_propagation_after_inlining (after)
+ /// CHECK: BoundType
+ /// CHECK-NOT: BoundType
+ private void boundOnlyOnceIfInstanceOf(Object o) {
+ if (o instanceof Main) {
+ o.toString();
+ }
+ }
+
+ /// CHECK-START: Final Main.boundOnlyOnceCheckCast(Generic) reference_type_propagation_after_inlining (after)
+ /// CHECK: BoundType
+ /// CHECK-NOT: BoundType
+ private Final boundOnlyOnceCheckCast(Generic<Final> o) {
+ Final f = o.get();
+ return f;
+ }
+
+ private Super getSuper() {
+ return new SubclassA();
+ }
+
+ /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:Super
+ /// CHECK: NullCheck [<<Phi>>] klass:Super
+
+ /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) reference_type_propagation_after_inlining (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:SubclassA
+ /// CHECK: NullCheck [<<Phi>>] klass:SubclassA
+ private void updateNodesInTheSameBlockAsPhi(boolean cond) {
+ Super s = getSuper();
+ if (cond) {
+ s = new SubclassA();
+ }
+ s.$noinline$f();
+ }
+
+ /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) reference_type_propagation_after_inlining (after)
+ /// CHECK: <<This:l\d+>> ParameterValue
+ /// CHECK: <<Param:l\d+>> ParameterValue
+ /// CHECK: <<Clazz:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Param>>,<<Clazz>>]
+ /// CHECK: BoundType [<<Param>>] can_be_null:true
+
+ /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) instruction_simplifier_after_types (after)
+ /// CHECK: <<This:l\d+>> ParameterValue
+ /// CHECK: <<Param:l\d+>> ParameterValue
+ /// CHECK: <<Clazz:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Param>>,<<Clazz>>]
+ /// CHECK: <<Bound:l\d+>> BoundType [<<Param>>]
+ /// CHECK: NullCheck [<<Bound>>]
+ public String checkcastPreserveNullCheck(Object a) {
+ return ((SubclassA)a).toString();
+ }
+
public static void main(String[] args) {
}
}
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 992a8a6ea1..d58f034f93 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -150,5 +150,12 @@
result: EXEC_FAILED,
modes: [device],
names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"]
+},
+{
+ description: "TimeZoneTest.testAllDisplayNames times out, needs investigation",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["libcore.java.util.TimeZoneTest.testAllDisplayNames"],
+ bug: 22786792
}
]