summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/common_compiler_test.cc4
-rw-r--r--compiler/common_compiler_test.h4
-rw-r--r--compiler/compiled_method.h2
-rw-r--r--compiler/dex/mir_optimization.cc13
-rw-r--r--compiler/driver/compiler_driver.cc190
-rw-r--r--compiler/driver/compiler_driver.h21
-rw-r--r--compiler/elf_fixup.cc50
-rw-r--r--compiler/elf_patcher.cc2
-rw-r--r--compiler/elf_stripper.cc36
-rw-r--r--compiler/oat_writer.cc2
-rw-r--r--compiler/optimizing/builder.cc85
-rw-r--r--compiler/optimizing/builder.h3
-rw-r--r--compiler/optimizing/code_generator_arm.cc127
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc105
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc107
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/codegen_test.cc2
-rw-r--r--compiler/optimizing/live_ranges_test.cc3
-rw-r--r--compiler/optimizing/nodes.h41
-rw-r--r--compiler/optimizing/optimizing_unit_test.h13
-rw-r--r--compiler/optimizing/pretty_printer_test.cc47
-rw-r--r--compiler/optimizing/register_allocator.cc3
-rw-r--r--compiler/optimizing/register_allocator.h4
-rw-r--r--compiler/optimizing/register_allocator_test.cc55
-rw-r--r--compiler/optimizing/ssa_test.cc3
-rw-r--r--compiler/optimizing/suspend_check_test.cc95
28 files changed, 804 insertions, 219 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index db9dcd4b4b..fbaed9ffab 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -367,7 +367,7 @@ void CommonCompilerTest::CompileMethod(mirror::ArtMethod* method) {
MakeExecutable(method);
}
-void CommonCompilerTest::CompileDirectMethod(ConstHandle<mirror::ClassLoader> class_loader,
+void CommonCompilerTest::CompileDirectMethod(Handle<mirror::ClassLoader> class_loader,
const char* class_name, const char* method_name,
const char* signature) {
std::string class_descriptor(DotToDescriptor(class_name));
@@ -380,7 +380,7 @@ void CommonCompilerTest::CompileDirectMethod(ConstHandle<mirror::ClassLoader> cl
CompileMethod(method);
}
-void CommonCompilerTest::CompileVirtualMethod(ConstHandle<mirror::ClassLoader> class_loader,
+void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader,
const char* class_name, const char* method_name,
const char* signature) {
std::string class_descriptor(DotToDescriptor(class_name));
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 4e74f0a521..df06b71c7d 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -63,11 +63,11 @@ class CommonCompilerTest : public CommonRuntimeTest {
void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CompileDirectMethod(ConstHandle<mirror::ClassLoader> class_loader, const char* class_name,
+ void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CompileVirtualMethod(ConstHandle<mirror::ClassLoader> class_loader, const char* class_name,
+ void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 3e34144836..cc46b92dc5 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -154,7 +154,7 @@ class SrcMap FINAL : public std::vector<SrcMapElem> {
// get rid of the highest values
size_t i = size() - 1;
for (; i > 0 ; i--) {
- if ((*this)[i].from_ >= highest_pc) {
+ if ((*this)[i].from_ < highest_pc) {
break;
}
}
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 7ac878f49b..fdabc3e3cb 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1360,11 +1360,20 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
if (!method_info.FastPath()) {
continue;
}
+
InvokeType sharp_type = method_info.GetSharpType();
- if ((sharp_type != kDirect) &&
- (sharp_type != kStatic || method_info.NeedsClassInitialization())) {
+ if ((sharp_type != kDirect) && (sharp_type != kStatic)) {
continue;
}
+
+ if (sharp_type == kStatic) {
+ bool needs_clinit = method_info.NeedsClassInitialization() &&
+ ((mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0);
+ if (needs_clinit) {
+ continue;
+ }
+ }
+
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
MethodReference target = method_info.GetTargetMethod();
if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file)
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index bbd19396cd..d743f907a3 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -350,10 +350,10 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
dump_stats_(dump_stats),
dump_passes_(dump_passes),
timings_logger_(timer),
- compiler_library_(NULL),
- compiler_context_(NULL),
- compiler_enable_auto_elf_loading_(NULL),
- compiler_get_method_code_addr_(NULL),
+ compiler_library_(nullptr),
+ compiler_context_(nullptr),
+ compiler_enable_auto_elf_loading_(nullptr),
+ compiler_get_method_code_addr_(nullptr),
support_boot_image_fixup_(instruction_set != kMips),
dedupe_code_("dedupe code"),
dedupe_src_mapping_table_("dedupe source mapping table"),
@@ -365,7 +365,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
DCHECK(verification_results_ != nullptr);
DCHECK(method_inliner_map_ != nullptr);
- CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key");
+ CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, nullptr), "compiler tls key");
dex_to_dex_compiler_ = reinterpret_cast<DexToDexCompilerFn>(ArtCompileDEX);
@@ -445,7 +445,7 @@ CompilerDriver::~CompilerDriver() {
CompilerTls* CompilerDriver::GetTls() {
// Lazily create thread-local storage
CompilerTls* res = static_cast<CompilerTls*>(pthread_getspecific(tls_key_));
- if (res == NULL) {
+ if (res == nullptr) {
res = compiler_->CreateNewCompilerTls();
CHECK_PTHREAD_CALL(pthread_setspecific, (tls_key_, res), "compiler tls");
}
@@ -520,20 +520,18 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader);
- if (klass == NULL) {
+ if (klass == nullptr) {
CHECK(self->IsExceptionPending());
self->ClearException();
return kDontDexToDexCompile;
}
- // The verifier can only run on "quick" instructions at runtime (see usage of
- // FindAccessedFieldAtDexPc and FindInvokedMethodAtDexPc in ThrowNullPointerExceptionFromDexPC
- // function). Since image classes can be verified again while compiling an application,
- // we must prevent the DEX-to-DEX compiler from introducing them.
- // TODO: find a way to enable "quick" instructions for image classes and remove this check.
- bool compiling_image_classes = class_loader.Get() == nullptr;
- if (compiling_image_classes) {
- return kRequired;
- } else if (klass->IsVerified()) {
+ // DexToDex at the kOptimize level may introduce quickened opcodes, which replace symbolic
+ // references with actual offsets. We cannot re-verify such instructions.
+ //
+ // We store the verification information in the class status in the oat file, which the linker
+ // can validate (checksums) and use to skip load-time verification. It is thus safe to
+ // optimize when a class has been fully verified before.
+ if (klass->IsVerified()) {
// Class is verified so we can enable DEX-to-DEX compilation for performance.
return kOptimize;
} else if (klass->IsCompileTimeVerified()) {
@@ -606,13 +604,14 @@ void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const De
ThreadPool* thread_pool, TimingLogger* timings) {
LoadImageClasses(timings);
+ Resolve(class_loader, dex_files, thread_pool, timings);
+
if (!compiler_options_->IsVerificationEnabled()) {
- VLOG(compiler) << "Verify none mode specified, skipping pre-compilation";
+ LOG(INFO) << "Verify none mode specified, skipping verification.";
+ SetVerified(class_loader, dex_files, thread_pool, timings);
return;
}
- Resolve(class_loader, dex_files, thread_pool, timings);
-
Verify(class_loader, dex_files, thread_pool, timings);
InitializeClasses(class_loader, dex_files, thread_pool, timings);
@@ -628,11 +627,11 @@ bool CompilerDriver::IsImageClass(const char* descriptor) const {
}
}
-static void ResolveExceptionsForMethod(MethodHelper* mh,
+static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = mh->GetMethod()->GetCodeItem();
- if (code_item == NULL) {
+ if (code_item == nullptr) {
return; // native or abstract method
}
if (code_item->tries_size_ == 0) {
@@ -671,7 +670,7 @@ static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg)
std::set<std::pair<uint16_t, const DexFile*>>* exceptions_to_resolve =
reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
StackHandleScope<1> hs(Thread::Current());
- MethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+ MutableMethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
mh.ChangeMethod(c->GetVirtualMethod(i));
ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
@@ -710,7 +709,7 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(
hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
- if (klass.Get() == NULL) {
+ if (klass.Get() == nullptr) {
VLOG(compiler) << "Failed to find class " << descriptor;
image_classes_->erase(it++);
self->ClearException();
@@ -738,7 +737,7 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
Handle<mirror::Class> klass(hs.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
- if (klass.Get() == NULL) {
+ if (klass.Get() == nullptr) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
LOG(FATAL) << "Failed to resolve class " << descriptor;
@@ -762,7 +761,7 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
// Make a copy of the handle so that we don't clobber it doing Assign.
- Handle<mirror::Class> klass(hs.NewHandle(c.Get()));
+ MutableHandle<mirror::Class> klass(hs.NewHandle(c.Get()));
std::string temp;
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
@@ -785,8 +784,8 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string
}
void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void* arg) {
- DCHECK(object != NULL);
- DCHECK(arg != NULL);
+ DCHECK(object != nullptr);
+ DCHECK(arg != nullptr);
CompilerDriver* compiler_driver = reinterpret_cast<CompilerDriver*>(arg);
StackHandleScope<1> hs(Thread::Current());
MaybeAddToImageClasses(hs.NewHandle(object->GetClass()), compiler_driver->image_classes_.get());
@@ -854,29 +853,29 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const Dex
uint32_t type_idx,
bool* type_known_final, bool* type_known_abstract,
bool* equals_referrers_class) {
- if (type_known_final != NULL) {
+ if (type_known_final != nullptr) {
*type_known_final = false;
}
- if (type_known_abstract != NULL) {
+ if (type_known_abstract != nullptr) {
*type_known_abstract = false;
}
- if (equals_referrers_class != NULL) {
+ if (equals_referrers_class != nullptr) {
*equals_referrers_class = false;
}
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
// Get type from dex cache assuming it was populated by the verifier
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- if (resolved_class == NULL) {
+ if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Unknown class needs access checks.
}
const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
- if (equals_referrers_class != NULL) {
+ if (equals_referrers_class != nullptr) {
*equals_referrers_class = (method_id.class_idx_ == type_idx);
}
mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
- if (referrer_class == NULL) {
+ if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
}
@@ -885,10 +884,10 @@ bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, const Dex
bool result = referrer_class->CanAccess(resolved_class);
if (result) {
stats_->TypeDoesntNeedAccessCheck();
- if (type_known_final != NULL) {
+ if (type_known_final != nullptr) {
*type_known_final = resolved_class->IsFinal() && !resolved_class->IsArrayClass();
}
- if (type_known_abstract != NULL) {
+ if (type_known_abstract != nullptr) {
*type_known_abstract = resolved_class->IsAbstract() && !resolved_class->IsArrayClass();
}
} else {
@@ -904,13 +903,13 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id
mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
- if (resolved_class == NULL) {
+ if (resolved_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Unknown class needs access checks.
}
const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
- if (referrer_class == NULL) {
+ if (referrer_class == nullptr) {
stats_->TypeNeedsAccessCheck();
return false; // Incomplete referrer knowledge needs access check.
}
@@ -1310,6 +1309,10 @@ const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file,
}
bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) {
+ if (!compiler_options_->IsVerificationEnabled()) {
+ // If we didn't verify, every cast has to be treated as non-safe.
+ return false;
+ }
DCHECK(mUnit->GetVerifiedMethod() != nullptr);
bool result = mUnit->GetVerifiedMethod()->IsSafeCast(dex_pc);
if (result) {
@@ -1410,7 +1413,7 @@ class ParallelCompilationManager {
thread_pool_(thread_pool) {}
ClassLinker* GetClassLinker() const {
- CHECK(class_linker_ != NULL);
+ CHECK(class_linker_ != nullptr);
return class_linker_;
}
@@ -1419,12 +1422,12 @@ class ParallelCompilationManager {
}
CompilerDriver* GetCompiler() const {
- CHECK(compiler_ != NULL);
+ CHECK(compiler_ != nullptr);
return compiler_;
}
const DexFile* GetDexFile() const {
- CHECK(dex_file_ != NULL);
+ CHECK(dex_file_ != nullptr);
return dex_file_;
}
@@ -1499,10 +1502,10 @@ class ParallelCompilationManager {
// that avoids the expensive FindInClassPath search.
static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(klass != NULL);
+ DCHECK(klass != nullptr);
const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
if (&dex_file != &original_dex_file) {
- if (class_loader == NULL) {
+ if (class_loader == nullptr) {
LOG(WARNING) << "Skipping class " << PrettyDescriptor(klass) << " from "
<< dex_file.GetLocation() << " previously found in "
<< original_dex_file.GetLocation();
@@ -1587,7 +1590,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
// static fields, instance fields, direct methods, and virtual
// methods.
const byte* class_data = dex_file.GetClassData(class_def);
- if (class_data == NULL) {
+ if (class_data == nullptr) {
// Empty class such as a marker interface.
requires_constructor_barrier = false;
} else {
@@ -1596,7 +1599,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
if (resolve_fields_and_methods) {
mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
dex_cache, class_loader, true);
- if (field == NULL) {
+ if (field == nullptr) {
CheckAndClearResolveException(soa.Self());
}
}
@@ -1605,13 +1608,13 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
// We require a constructor barrier if there are final instance fields.
requires_constructor_barrier = false;
while (it.HasNextInstanceField()) {
- if ((it.GetMemberAccessFlags() & kAccFinal) != 0) {
+ if (it.MemberIsFinal()) {
requires_constructor_barrier = true;
}
if (resolve_fields_and_methods) {
mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
dex_cache, class_loader, false);
- if (field == NULL) {
+ if (field == nullptr) {
CheckAndClearResolveException(soa.Self());
}
}
@@ -1623,7 +1626,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
dex_cache, class_loader,
NullHandle<mirror::ArtMethod>(),
it.GetMethodInvokeType(class_def));
- if (method == NULL) {
+ if (method == nullptr) {
CheckAndClearResolveException(soa.Self());
}
it.Next();
@@ -1633,7 +1636,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
dex_cache, class_loader,
NullHandle<mirror::ArtMethod>(),
it.GetMethodInvokeType(class_def));
- if (method == NULL) {
+ if (method == nullptr) {
CheckAndClearResolveException(soa.Self());
}
it.Next();
@@ -1658,9 +1661,9 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
- if (klass == NULL) {
+ if (klass == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
- mirror::Throwable* exception = soa.Self()->GetException(NULL);
+ mirror::Throwable* exception = soa.Self()->GetException(nullptr);
VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
// There's little point continuing compilation if the heap is exhausted.
@@ -1691,11 +1694,20 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil
context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
}
+void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ for (size_t i = 0; i != dex_files.size(); ++i) {
+ const DexFile* dex_file = dex_files[i];
+ CHECK(dex_file != nullptr);
+ SetVerifiedDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
+ }
+}
+
void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
@@ -1757,6 +1769,50 @@ void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file
context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
}
+static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t class_def_index)
+ LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ ATRACE_CALL();
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile& dex_file = *manager->GetDexFile();
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ ClassLinker* class_linker = manager->GetClassLinker();
+ jobject jclass_loader = manager->GetClassLoader();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ // Class might have failed resolution. Then don't set it to verified.
+ if (klass.Get() != nullptr) {
+ // Only do this if the class is resolved. If even resolution fails, quickening will go very,
+ // very wrong.
+ if (klass->IsResolved()) {
+ if (klass->GetStatus() < mirror::Class::kStatusVerified) {
+ ObjectLock<mirror::Class> lock(soa.Self(), klass);
+ klass->SetStatus(mirror::Class::kStatusVerified, soa.Self());
+ }
+ // Record the final class status if necessary.
+ ClassReference ref(manager->GetDexFile(), class_def_index);
+ manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+ }
+ } else {
+ Thread* self = soa.Self();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ }
+}
+
+void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
+ ThreadPool* thread_pool, TimingLogger* timings) {
+ TimingLogger::ScopedTiming t("Verify Dex File", timings);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
+ thread_pool);
+ context.ForAll(0, dex_file.NumClassDefs(), SetVerifiedClass, thread_count_);
+}
+
static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
ATRACE_CALL();
@@ -1865,7 +1921,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
InitializeClasses(class_loader, *dex_file, dex_files, thread_pool, timings);
}
if (IsImage()) {
@@ -1878,7 +1934,7 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi
ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
CompileDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
}
}
@@ -1911,7 +1967,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
return;
}
const byte* class_data = dex_file.GetClassData(class_def);
- if (class_data == NULL) {
+ if (class_data == nullptr) {
// empty class, probably a marker interface
return;
}
@@ -1946,7 +2002,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
continue;
}
previous_direct_method_idx = method_idx;
- driver->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(),
+ driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
it.Next();
@@ -1962,7 +2018,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
continue;
}
previous_virtual_method_idx = method_idx;
- driver->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(),
+ driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
it.Next();
@@ -1984,7 +2040,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
uint32_t method_idx, jobject class_loader,
const DexFile& dex_file,
DexToDexCompilationLevel dex_to_dex_compilation_level) {
- CompiledMethod* compiled_method = NULL;
+ CompiledMethod* compiled_method = nullptr;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
if ((access_flags & kAccNative) != 0) {
@@ -1994,14 +2050,14 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
// Leaving this empty will trigger the generic JNI version
} else {
compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
- CHECK(compiled_method != NULL);
+ CHECK(compiled_method != nullptr);
}
} else if ((access_flags & kAccAbstract) != 0) {
} else {
MethodReference method_ref(&dex_file, method_idx);
bool compile = verification_results_->IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
- // NOTE: if compiler declines to compile this method, it will return NULL.
+ // NOTE: if compiler declines to compile this method, it will return nullptr.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file);
}
@@ -2022,20 +2078,20 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
}
Thread* self = Thread::Current();
- if (compiled_method != NULL) {
+ if (compiled_method != nullptr) {
MethodReference ref(&dex_file, method_idx);
- DCHECK(GetCompiledMethod(ref) == NULL) << PrettyMethod(method_idx, dex_file);
+ DCHECK(GetCompiledMethod(ref) == nullptr) << PrettyMethod(method_idx, dex_file);
{
MutexLock mu(self, compiled_methods_lock_);
compiled_methods_.Put(ref, compiled_method);
}
- DCHECK(GetCompiledMethod(ref) != NULL) << PrettyMethod(method_idx, dex_file);
+ DCHECK(GetCompiledMethod(ref) != nullptr) << PrettyMethod(method_idx, dex_file);
}
if (self->IsExceptionPending()) {
ScopedObjectAccess soa(self);
LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n"
- << self->GetException(NULL)->Dump();
+ << self->GetException(nullptr)->Dump();
}
}
@@ -2043,9 +2099,9 @@ CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const {
MutexLock mu(Thread::Current(), compiled_classes_lock_);
ClassTable::const_iterator it = compiled_classes_.find(ref);
if (it == compiled_classes_.end()) {
- return NULL;
+ return nullptr;
}
- CHECK(it->second != NULL);
+ CHECK(it->second != nullptr);
return it->second;
}
@@ -2079,9 +2135,9 @@ CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const {
MutexLock mu(Thread::Current(), compiled_methods_lock_);
MethodTable::const_iterator it = compiled_methods_.find(ref);
if (it == compiled_methods_.end()) {
- return NULL;
+ return nullptr;
}
- CHECK(it->second != NULL);
+ CHECK(it->second != nullptr);
return it->second;
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 624947d7b1..e7bd35776a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -81,7 +81,7 @@ class CompilerDriver {
// Create a compiler targeting the requested "instruction_set".
// "image" should be true if image specific optimizations should be
// enabled. "image_classes" lets the compiler know what classes it
- // can assume will be in the image, with NULL implying all available
+ // can assume will be in the image, with nullptr implying all available
// classes.
explicit CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
@@ -183,9 +183,9 @@ class CompilerDriver {
// Are runtime access checks necessary in the compiled code?
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
- uint32_t type_idx, bool* type_known_final = NULL,
- bool* type_known_abstract = NULL,
- bool* equals_referrers_class = NULL)
+ uint32_t type_idx, bool* type_known_final = nullptr,
+ bool* type_known_abstract = nullptr,
+ bool* equals_referrers_class = nullptr)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
@@ -260,7 +260,7 @@ class CompilerDriver {
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Get declaration location of a resolved field.
+ // Get the index in the vtable of the method.
uint16_t GetResolvedMethodVTableIndex(
mirror::ArtMethod* resolved_method, InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -436,7 +436,7 @@ class CompilerDriver {
referrer_class_def_idx_(referrer_class_def_idx),
referrer_method_idx_(referrer_method_idx),
literal_offset_(literal_offset) {
- CHECK(dex_file_ != NULL);
+ CHECK(dex_file_ != nullptr);
}
virtual ~PatchInformation() {}
@@ -655,6 +655,13 @@ class CompilerDriver {
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+ ThreadPool* thread_pool, TimingLogger* timings);
+ void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
+ ThreadPool* thread_pool, TimingLogger* timings)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
ThreadPool* thread_pool, TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -712,7 +719,7 @@ class CompilerDriver {
const bool image_;
// If image_ is true, specifies the classes that will be included in
- // the image. Note if image_classes_ is NULL, all classes are
+ // the image. Note if image_classes_ is nullptr, all classes are
// included in the image.
std::unique_ptr<std::set<std::string>> image_classes_;
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index bbfbc6ece0..0d348793df 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -89,17 +89,18 @@ bool ElfFixup::FixupDynamic(ElfFile& elf_file, uintptr_t base_address) {
bool ElfFixup::FixupSectionHeaders(ElfFile& elf_file, uintptr_t base_address) {
for (Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
- Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
+ Elf32_Shdr* sh = elf_file.GetSectionHeader(i);
+ CHECK(sh != nullptr);
// 0 implies that the section will not exist in the memory of the process
- if (sh.sh_addr == 0) {
+ if (sh->sh_addr == 0) {
continue;
}
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Shdr[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
- sh.sh_addr, sh.sh_addr + base_address);
+ sh->sh_addr, sh->sh_addr + base_address);
}
- sh.sh_addr += base_address;
+ sh->sh_addr += base_address;
}
return true;
}
@@ -107,18 +108,19 @@ bool ElfFixup::FixupSectionHeaders(ElfFile& elf_file, uintptr_t base_address) {
bool ElfFixup::FixupProgramHeaders(ElfFile& elf_file, uintptr_t base_address) {
// TODO: ELFObjectFile doesn't have give to Elf32_Phdr, so we do that ourselves for now.
for (Elf32_Word i = 0; i < elf_file.GetProgramHeaderNum(); i++) {
- Elf32_Phdr& ph = elf_file.GetProgramHeader(i);
- CHECK_EQ(ph.p_vaddr, ph.p_paddr) << elf_file.GetFile().GetPath() << " i=" << i;
- CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+ Elf32_Phdr* ph = elf_file.GetProgramHeader(i);
+ CHECK(ph != nullptr);
+ CHECK_EQ(ph->p_vaddr, ph->p_paddr) << elf_file.GetFile().GetPath() << " i=" << i;
+ CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
<< elf_file.GetFile().GetPath() << " i=" << i;
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Phdr[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
- ph.p_vaddr, ph.p_vaddr + base_address);
+ ph->p_vaddr, ph->p_vaddr + base_address);
}
- ph.p_vaddr += base_address;
- ph.p_paddr += base_address;
- CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+ ph->p_vaddr += base_address;
+ ph->p_paddr += base_address;
+ CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
<< elf_file.GetFile().GetPath() << " i=" << i;
}
return true;
@@ -128,20 +130,21 @@ bool ElfFixup::FixupSymbols(ElfFile& elf_file, uintptr_t base_address, bool dyna
Elf32_Word section_type = dynamic ? SHT_DYNSYM : SHT_SYMTAB;
// TODO: Unfortunate ELFObjectFile has protected symbol access, so use ElfFile
Elf32_Shdr* symbol_section = elf_file.FindSectionByType(section_type);
- if (symbol_section == NULL) {
+ if (symbol_section == nullptr) {
// file is missing optional .symtab
CHECK(!dynamic) << elf_file.GetFile().GetPath();
return true;
}
for (uint32_t i = 0; i < elf_file.GetSymbolNum(*symbol_section); i++) {
- Elf32_Sym& symbol = elf_file.GetSymbol(section_type, i);
- if (symbol.st_value != 0) {
+ Elf32_Sym* symbol = elf_file.GetSymbol(section_type, i);
+ CHECK(symbol != nullptr);
+ if (symbol->st_value != 0) {
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Sym[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
- symbol.st_value, symbol.st_value + base_address);
+ symbol->st_value, symbol->st_value + base_address);
}
- symbol.st_value += base_address;
+ symbol->st_value += base_address;
}
}
return true;
@@ -149,10 +152,11 @@ bool ElfFixup::FixupSymbols(ElfFile& elf_file, uintptr_t base_address, bool dyna
bool ElfFixup::FixupRelocations(ElfFile& elf_file, uintptr_t base_address) {
for (Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
- Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
- if (sh.sh_type == SHT_REL) {
- for (uint32_t i = 0; i < elf_file.GetRelNum(sh); i++) {
- Elf32_Rel& rel = elf_file.GetRel(sh, i);
+ Elf32_Shdr* sh = elf_file.GetSectionHeader(i);
+ CHECK(sh != nullptr);
+ if (sh->sh_type == SHT_REL) {
+ for (uint32_t i = 0; i < elf_file.GetRelNum(*sh); i++) {
+ Elf32_Rel& rel = elf_file.GetRel(*sh, i);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Rel[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
@@ -160,9 +164,9 @@ bool ElfFixup::FixupRelocations(ElfFile& elf_file, uintptr_t base_address) {
}
rel.r_offset += base_address;
}
- } else if (sh.sh_type == SHT_RELA) {
- for (uint32_t i = 0; i < elf_file.GetRelaNum(sh); i++) {
- Elf32_Rela& rela = elf_file.GetRela(sh, i);
+ } else if (sh->sh_type == SHT_RELA) {
+ for (uint32_t i = 0; i < elf_file.GetRelaNum(*sh); i++) {
+ Elf32_Rela& rela = elf_file.GetRela(*sh, i);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Rela[%d] from 0x%08x to 0x%08" PRIxPTR,
elf_file.GetFile().GetPath().c_str(), i,
diff --git a/compiler/elf_patcher.cc b/compiler/elf_patcher.cc
index f192227c1f..92eb4d8955 100644
--- a/compiler/elf_patcher.cc
+++ b/compiler/elf_patcher.cc
@@ -276,7 +276,7 @@ bool ElfPatcher::WriteOutPatchData() {
<< "We got more patches than anticipated";
CHECK_LE(reinterpret_cast<uintptr_t>(elf_file_->Begin()) + shdr->sh_offset + shdr->sh_size,
reinterpret_cast<uintptr_t>(elf_file_->End())) << "section is too large";
- CHECK(shdr == &elf_file_->GetSectionHeader(elf_file_->GetSectionHeaderNum() - 1) ||
+ CHECK(shdr == elf_file_->GetSectionHeader(elf_file_->GetSectionHeaderNum() - 1) ||
shdr->sh_offset + shdr->sh_size <= (shdr + 1)->sh_offset)
<< "Section overlaps onto next section";
// It's mmap'd so we can just memcpy.
diff --git a/compiler/elf_stripper.cc b/compiler/elf_stripper.cc
index 0b86ad0711..457d8a0940 100644
--- a/compiler/elf_stripper.cc
+++ b/compiler/elf_stripper.cc
@@ -72,13 +72,15 @@ bool ElfStripper::Strip(File* file, std::string* error_msg) {
section_headers.reserve(elf_file->GetSectionHeaderNum());
- Elf32_Shdr& string_section = elf_file->GetSectionNameStringSection();
+ Elf32_Shdr* string_section = elf_file->GetSectionNameStringSection();
+ CHECK(string_section != nullptr);
for (Elf32_Word i = 0; i < elf_file->GetSectionHeaderNum(); i++) {
- Elf32_Shdr& sh = elf_file->GetSectionHeader(i);
- const char* name = elf_file->GetString(string_section, sh.sh_name);
- if (name == NULL) {
+ Elf32_Shdr* sh = elf_file->GetSectionHeader(i);
+ CHECK(sh != nullptr);
+ const char* name = elf_file->GetString(*string_section, sh->sh_name);
+ if (name == nullptr) {
CHECK_EQ(0U, i);
- section_headers.push_back(sh);
+ section_headers.push_back(*sh);
section_headers_original_indexes.push_back(0);
continue;
}
@@ -87,32 +89,34 @@ bool ElfStripper::Strip(File* file, std::string* error_msg) {
|| (strcmp(name, ".symtab") == 0)) {
continue;
}
- section_headers.push_back(sh);
+ section_headers.push_back(*sh);
section_headers_original_indexes.push_back(i);
}
CHECK_NE(0U, section_headers.size());
CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
// section 0 is the NULL section, sections start at offset of first section
- Elf32_Off offset = elf_file->GetSectionHeader(1).sh_offset;
+ CHECK(elf_file->GetSectionHeader(1) != nullptr);
+ Elf32_Off offset = elf_file->GetSectionHeader(1)->sh_offset;
for (size_t i = 1; i < section_headers.size(); i++) {
Elf32_Shdr& new_sh = section_headers[i];
- Elf32_Shdr& old_sh = elf_file->GetSectionHeader(section_headers_original_indexes[i]);
- CHECK_EQ(new_sh.sh_name, old_sh.sh_name);
- if (old_sh.sh_addralign > 1) {
- offset = RoundUp(offset, old_sh.sh_addralign);
+ Elf32_Shdr* old_sh = elf_file->GetSectionHeader(section_headers_original_indexes[i]);
+ CHECK(old_sh != nullptr);
+ CHECK_EQ(new_sh.sh_name, old_sh->sh_name);
+ if (old_sh->sh_addralign > 1) {
+ offset = RoundUp(offset, old_sh->sh_addralign);
}
- if (old_sh.sh_offset == offset) {
+ if (old_sh->sh_offset == offset) {
// already in place
- offset += old_sh.sh_size;
+ offset += old_sh->sh_size;
continue;
}
// shift section earlier
memmove(elf_file->Begin() + offset,
- elf_file->Begin() + old_sh.sh_offset,
- old_sh.sh_size);
+ elf_file->Begin() + old_sh->sh_offset,
+ old_sh->sh_size);
new_sh.sh_offset = offset;
- offset += old_sh.sh_size;
+ offset += old_sh->sh_size;
}
Elf32_Off shoff = offset;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index c5d1478a43..e74d6de4eb 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -437,7 +437,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
std::vector<uint8_t> const * gc_map = compiled_method->GetGcMap();
if (gc_map != nullptr) {
size_t gc_map_size = gc_map->size() * sizeof(gc_map[0]);
- bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
+ bool is_native = it.MemberIsNative();
CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
<< gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
<< (status < mirror::Class::kStatusVerified) << " " << status << " "
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index ecd6802ca4..33b00d2ac9 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -138,13 +138,15 @@ static bool CanHandleCodeItem(const DexFile::CodeItem& code_item) {
template<typename T>
void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
+ int32_t target_offset = instruction.GetTargetOffset();
+ PotentiallyAddSuspendCheck(target_offset, dex_offset);
HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
T* comparison = new (arena_) T(first, second);
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + instruction.GetTargetOffset());
+ HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
@@ -155,12 +157,14 @@ void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset)
template<typename T>
void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_offset) {
+ int32_t target_offset = instruction.GetTargetOffset();
+ PotentiallyAddSuspendCheck(target_offset, dex_offset);
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
T* comparison = new (arena_) T(value, GetIntConstant(0));
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + instruction.GetTargetOffset());
+ HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
@@ -209,6 +213,8 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
// Add the exit block at the end to give it the highest id.
graph_->AddBlock(exit_block_);
exit_block_->AddInstruction(new (arena_) HExit());
+ // Add the suspend check to the entry block.
+ entry_block_->AddInstruction(new (arena_) HSuspendCheck(0));
entry_block_->AddInstruction(new (arena_) HGoto());
return graph_;
}
@@ -325,18 +331,61 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
bool is_range,
uint32_t* args,
uint32_t register_index) {
+ Instruction::Code opcode = instruction.Opcode();
+ InvokeType invoke_type;
+ switch (opcode) {
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE:
+ invoke_type = kStatic;
+ break;
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ invoke_type = kDirect;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ invoke_type = kVirtual;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_SUPER:
+ invoke_type = kSuper;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke op: " << opcode;
+ return false;
+ }
+
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(method_id.proto_idx_);
const char* descriptor = dex_file_->StringDataByIdx(proto_id.shorty_idx_);
Primitive::Type return_type = Primitive::GetType(descriptor[0]);
- bool is_instance_call =
- instruction.Opcode() != Instruction::INVOKE_STATIC
- && instruction.Opcode() != Instruction::INVOKE_STATIC_RANGE;
+ bool is_instance_call = invoke_type != kStatic;
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
- // Treat invoke-direct like static calls for now.
- HInvoke* invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ HInvoke* invoke = nullptr;
+ if (invoke_type == kVirtual) {
+ MethodReference target_method(dex_file_, method_idx);
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ int vtable_index;
+ // TODO: Add devirtualization support.
+ compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
+ &invoke_type, &target_method, &vtable_index,
+ &direct_code, &direct_method);
+ if (vtable_index == -1) {
+ return false;
+ }
+ invoke = new (arena_) HInvokeVirtual(
+ arena_, number_of_arguments, return_type, dex_offset, vtable_index);
+ } else {
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ }
size_t start_index = 0;
Temporaries temps(graph_, is_instance_call ? 1 : 0);
@@ -462,7 +511,15 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
}
}
-bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset) {
+void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {
+ if (target_offset <= 0) {
+ // Unconditionnally add a suspend check to backward branches. We can remove
+ // them after we recognize loops in the graph.
+ current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_offset));
+ }
+}
+
+bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset) {
if (current_block_ == nullptr) {
return true; // Dead code
}
@@ -580,7 +637,9 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
- HBasicBlock* target = FindBlockStartingAt(instruction.GetTargetOffset() + dex_offset);
+ int32_t offset = instruction.GetTargetOffset();
+ PotentiallyAddSuspendCheck(offset, dex_offset);
+ HBasicBlock* target = FindBlockStartingAt(offset + dex_offset);
DCHECK(target != nullptr);
current_block_->AddInstruction(new (arena_) HGoto());
current_block_->AddSuccessor(target);
@@ -604,7 +663,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
}
case Instruction::INVOKE_STATIC:
- case Instruction::INVOKE_DIRECT: {
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
@@ -616,7 +676,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
}
case Instruction::INVOKE_STATIC_RANGE:
- case Instruction::INVOKE_DIRECT_RANGE: {
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 170c42761a..e143786be7 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -54,7 +54,7 @@ class HGraphBuilder : public ValueObject {
// Analyzes the dex instruction and adds HInstruction to the graph
// to execute that instruction. Returns whether the instruction can
// be handled.
- bool AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset);
+ bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset);
// Finds all instructions that start a new block, and populates branch_targets_ with
// the newly created blocks.
@@ -70,6 +70,7 @@ class HGraphBuilder : public ValueObject {
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
+ void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset);
// Temporarily returns whether the compiler supports the parameters
// of the method.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e72e39ba71..ad622798a6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
@@ -90,6 +91,29 @@ class StackOverflowCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM);
};
+class SuspendCheckSlowPathARM : public SlowPathCode {
+ public:
+ explicit SuspendCheckSlowPathARM(HSuspendCheck* instruction)
+ : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pTestSuspend).Int32Value();
+ __ ldr(LR, Address(TR, offset));
+ __ blx(LR);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ __ b(GetReturnLabel());
+ }
+
+ Label* GetReturnLabel() { return &return_label_; }
+
+ private:
+ HSuspendCheck* const instruction_;
+ Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
+};
+
class BoundsCheckSlowPathARM : public SlowPathCode {
public:
explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction,
@@ -795,6 +819,47 @@ void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
}
void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
+ __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+ invoke->GetIndexInDexCache() * kArmWordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ ldr(temp, Address(temp, index_in_cache));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ ldr(LR, Address(temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ // LR()
+ __ blx(LR);
+
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(ArmCoreLocation(R0));
@@ -829,37 +894,30 @@ void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
}
}
-void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
- __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
-}
-void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kArmWordSize;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ ldr(temp, Address(temp, index_in_cache));
- // LR = temp[offset_of_quick_compiled_code]
- __ ldr(LR, Address(temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
- // LR()
+ uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ ldr(temp, Address(SP, receiver.GetStackIndex()));
+ __ ldr(temp, Address(temp, class_offset));
+ } else {
+ __ ldr(temp, Address(receiver.AsArm().AsCoreRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ __ ldr(temp, Address(temp, method_offset));
+ // LR = temp->GetEntryPoint();
+ __ ldr(LR, Address(temp, entry_point));
+ // LR();
__ blx(LR);
-
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::VisitAdd(HAdd* add) {
@@ -1494,6 +1552,21 @@ void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction)
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
+void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
+ SuspendCheckSlowPathARM* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ __ AddConstant(R4, R4, -1);
+ __ cmp(R4, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), LE);
+ __ Bind(slow_path->GetReturnLabel());
+}
+
ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
return codegen_->GetAssembler();
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 660294b147..2480960f32 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -93,6 +93,8 @@ class LocationsBuilderARM : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6602d3fb45..3383cb2117 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/stack_checks.h"
@@ -114,6 +115,27 @@ class BoundsCheckSlowPathX86 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
};
+class SuspendCheckSlowPathX86 : public SlowPathCode {
+ public:
+ explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction)
+ : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ __ jmp(GetReturnLabel());
+ }
+
+ Label* GetReturnLabel() { return &return_label_; }
+
+ private:
+ HSuspendCheck* const instruction_;
+ Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
+};
+
#undef __
#define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
@@ -742,6 +764,40 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
}
void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+ invoke->GetIndexInDexCache() * kX86WordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, index_in_cache));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(X86CpuLocation(EAX));
@@ -778,26 +834,23 @@ void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
invoke->SetLocations(locations);
}
-void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kX86WordSize;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
- // (temp + offset_of_quick_compiled_code)()
+ uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movl(temp, Address(ESP, receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
+ } else {
+ __ movl(temp, Address(receiver.AsX86().AsCpuRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
@@ -1483,6 +1536,20 @@ void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction)
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
+void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) {
+ SuspendCheckSlowPathX86* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ fs()->cmpl(Address::Absolute(
+ Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetReturnLabel());
+}
+
X86Assembler* ParallelMoveResolverX86::GetAssembler() const {
return codegen_->GetAssembler();
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7c502049d8..f1be0ad5b7 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -94,6 +94,8 @@ class LocationsBuilderX86 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorX86* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b2d81e35dd..ca03af8e9f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "mirror/object_reference.h"
#include "thread.h"
#include "utils/assembler.h"
@@ -95,6 +96,27 @@ class StackOverflowCheckSlowPathX86_64 : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86_64);
};
+class SuspendCheckSlowPathX86_64 : public SlowPathCode {
+ public:
+ explicit SuspendCheckSlowPathX86_64(HSuspendCheck* instruction)
+ : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ __ jmp(GetReturnLabel());
+ }
+
+ Label* GetReturnLabel() { return &return_label_; }
+
+ private:
+ HSuspendCheck* const instruction_;
+ Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86_64);
+};
+
class BoundsCheckSlowPathX86_64 : public SlowPathCode {
public:
explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction,
@@ -688,12 +710,46 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type
}
void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
+ invoke->GetIndexInDexCache() * heap_reference_size;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, index_in_cache));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(X86_64CpuLocation(RDI));
InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (size_t i = 0; i < invoke->InputCount(); ++i) {
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -719,26 +775,23 @@ void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
}
}
-void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
- invoke->GetIndexInDexCache() * heap_reference_size;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
- // (temp + offset_of_quick_compiled_code)()
+ size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ size_t class_offset = mirror::Object::ClassOffset().SizeValue();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+ __ movq(temp, Address(temp, class_offset));
+ } else {
+ __ movq(temp, Address(receiver.AsX86_64().AsCpuRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
@@ -1329,6 +1382,20 @@ void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instructio
codegen_->GetMoveResolver()->EmitNativeCode(instruction);
}
+void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ SuspendCheckSlowPathX86_64* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ gs()->cmpl(Address::Absolute(
+ Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(), true), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetReturnLabel());
+}
+
X86_64Assembler* ParallelMoveResolverX86_64::GetAssembler() const {
return codegen_->GetAssembler();
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 44552ea465..78b60fe93c 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -91,6 +91,8 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorX86_64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index b9712e148c..7161eed9f9 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -72,6 +72,8 @@ static void TestCode(const uint16_t* data, bool has_result = false, int32_t expe
HGraphBuilder builder(&arena);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
ASSERT_NE(graph, nullptr);
InternalCodeAllocator allocator;
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 21e634de04..a81a30e457 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -32,6 +32,9 @@ static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
HGraphBuilder builder(allocator);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
+ // Suspend checks implementation may change in the future, and this test relies
+ // on how instructions are ordered.
+ RemoveSuspendChecks(graph);
graph->BuildDominatorTree();
graph->TransformToSSA();
graph->FindNaturalLoops();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9018fee0a5..d6dfeaede8 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -422,6 +422,7 @@ class HBasicBlock : public ArenaObject {
M(If) \
M(IntConstant) \
M(InvokeStatic) \
+ M(InvokeVirtual) \
M(LoadLocal) \
M(Local) \
M(LongConstant) \
@@ -443,6 +444,7 @@ class HBasicBlock : public ArenaObject {
M(BoundsCheck) \
M(NullCheck) \
M(Temporary) \
+ M(SuspendCheck) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1271,6 +1273,26 @@ class HInvokeStatic : public HInvoke {
DISALLOW_COPY_AND_ASSIGN(HInvokeStatic);
};
+class HInvokeVirtual : public HInvoke {
+ public:
+ HInvokeVirtual(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t vtable_index)
+ : HInvoke(arena, number_of_arguments, return_type, dex_pc),
+ vtable_index_(vtable_index) {}
+
+ uint32_t GetVTableIndex() const { return vtable_index_; }
+
+ DECLARE_INSTRUCTION(InvokeVirtual);
+
+ private:
+ const uint32_t vtable_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
+};
+
class HNewInstance : public HExpression<0> {
public:
HNewInstance(uint32_t dex_pc, uint16_t type_index)
@@ -1593,6 +1615,25 @@ class HTemporary : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HTemporary);
};
+class HSuspendCheck : public HTemplateInstruction<0> {
+ public:
+ explicit HSuspendCheck(uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::ChangesSomething()), dex_pc_(dex_pc) {}
+
+ virtual bool NeedsEnvironment() const {
+ return true;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ DECLARE_INSTRUCTION(SuspendCheck);
+
+ private:
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
+};
+
class MoveOperands : public ArenaObject {
public:
MoveOperands(Location source, Location destination)
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 36a6a21d01..c409529727 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -48,6 +48,19 @@ LiveInterval* BuildInterval(const size_t ranges[][2],
return interval;
}
+void RemoveSuspendChecks(HGraph* graph) {
+ for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
+ for (HInstructionIterator it(graph->GetBlocks().Get(i)->GetInstructions());
+ !it.Done();
+ it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->IsSuspendCheck()) {
+ current->GetBlock()->RemoveInstruction(current);
+ }
+ }
+ }
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 7e604e99b4..da6b294d71 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -45,7 +45,8 @@ TEST(PrettyPrinterTest, ReturnVoid) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 2: Goto 1\n"
+ " 2: SuspendCheck\n"
+ " 3: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 2\n"
" 0: ReturnVoid\n"
"BasicBlock 2, pred: 1\n"
@@ -57,7 +58,8 @@ TEST(PrettyPrinterTest, ReturnVoid) {
TEST(PrettyPrinterTest, CFG1) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 3: Goto 1\n"
+ " 3: SuspendCheck\n"
+ " 4: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 2\n"
" 0: Goto 2\n"
"BasicBlock 2, pred: 1, succ: 3\n"
@@ -76,7 +78,8 @@ TEST(PrettyPrinterTest, CFG1) {
TEST(PrettyPrinterTest, CFG2) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 4: Goto 1\n"
+ " 4: SuspendCheck\n"
+ " 5: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 2\n"
" 0: Goto 2\n"
"BasicBlock 2, pred: 1, succ: 3\n"
@@ -97,15 +100,17 @@ TEST(PrettyPrinterTest, CFG2) {
TEST(PrettyPrinterTest, CFG3) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 4: Goto 1\n"
+ " 5: SuspendCheck\n"
+ " 6: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 3\n"
" 0: Goto 3\n"
"BasicBlock 2, pred: 3, succ: 4\n"
" 1: ReturnVoid\n"
"BasicBlock 3, pred: 1, succ: 2\n"
- " 2: Goto 2\n"
+ " 2: SuspendCheck\n"
+ " 3: Goto 2\n"
"BasicBlock 4, pred: 2\n"
- " 3: Exit\n";
+ " 4: Exit\n";
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
@@ -132,11 +137,13 @@ TEST(PrettyPrinterTest, CFG3) {
TEST(PrettyPrinterTest, CFG4) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 2: Goto 1\n"
+ " 3: SuspendCheck\n"
+ " 4: Goto 1\n"
"BasicBlock 1, pred: 0, 1, succ: 1\n"
- " 0: Goto 1\n"
+ " 0: SuspendCheck\n"
+ " 1: Goto 1\n"
"BasicBlock 2\n"
- " 1: Exit\n";
+ " 2: Exit\n";
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
@@ -153,13 +160,15 @@ TEST(PrettyPrinterTest, CFG4) {
TEST(PrettyPrinterTest, CFG5) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 3: Goto 1\n"
+ " 4: SuspendCheck\n"
+ " 5: Goto 1\n"
"BasicBlock 1, pred: 0, 2, succ: 3\n"
" 0: ReturnVoid\n"
"BasicBlock 2, succ: 1\n"
- " 1: Goto 1\n"
+ " 1: SuspendCheck\n"
+ " 2: Goto 1\n"
"BasicBlock 3, pred: 1\n"
- " 2: Exit\n";
+ " 3: Exit\n";
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
@@ -174,7 +183,8 @@ TEST(PrettyPrinterTest, CFG6) {
"BasicBlock 0, succ: 1\n"
" 0: Local [4, 3, 2]\n"
" 1: IntConstant [2]\n"
- " 10: Goto 1\n"
+ " 10: SuspendCheck\n"
+ " 11: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 3, 2\n"
" 2: StoreLocal(0, 1)\n"
" 3: LoadLocal(0) [5]\n"
@@ -202,7 +212,8 @@ TEST(PrettyPrinterTest, CFG7) {
"BasicBlock 0, succ: 1\n"
" 0: Local [4, 3, 2]\n"
" 1: IntConstant [2]\n"
- " 10: Goto 1\n"
+ " 11: SuspendCheck\n"
+ " 12: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 3, 2\n"
" 2: StoreLocal(0, 1)\n"
" 3: LoadLocal(0) [5]\n"
@@ -212,9 +223,10 @@ TEST(PrettyPrinterTest, CFG7) {
"BasicBlock 2, pred: 1, 3, succ: 3\n"
" 7: Goto 3\n"
"BasicBlock 3, pred: 1, 2, succ: 2\n"
- " 8: Goto 2\n"
+ " 8: SuspendCheck\n"
+ " 9: Goto 2\n"
"BasicBlock 4\n"
- " 9: Exit\n";
+ " 10: Exit\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -230,7 +242,8 @@ TEST(PrettyPrinterTest, IntConstant) {
"BasicBlock 0, succ: 1\n"
" 0: Local [2]\n"
" 1: IntConstant [2]\n"
- " 5: Goto 1\n"
+ " 5: SuspendCheck\n"
+ " 6: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 2\n"
" 2: StoreLocal(0, 1)\n"
" 3: ReturnVoid\n"
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index b451ef4c4b..786261121b 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -440,7 +440,8 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
DCHECK(inactive->HasRegister());
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
- free_until[inactive->GetRegister()] = next_intersection;
+ free_until[inactive->GetRegister()] =
+ std::min(free_until[inactive->GetRegister()], next_intersection);
}
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index f737491026..7d397e3649 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -21,6 +21,8 @@
#include "primitive.h"
#include "utils/growable_array.h"
+#include "gtest/gtest.h"
+
namespace art {
class CodeGenerator;
@@ -177,6 +179,8 @@ class RegisterAllocator {
// Slots reserved for out arguments.
size_t reserved_out_slots_;
+ FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
+
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index dcae46b4bd..3e3b6b12a2 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -392,4 +392,59 @@ TEST(RegisterAllocatorTest, DeadPhi) {
ASSERT_TRUE(register_allocator.Validate(false));
}
+/**
+ * Test that the TryAllocateFreeReg method works in the presence of inactive intervals
+ * that share the same register. It should split the interval it is currently
+ * allocating for at the minimum lifetime position between the two inactive intervals.
+ */
+TEST(RegisterAllocatorTest, FreeUntil) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::RETURN);
+
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = BuildSSAGraph(data, &allocator);
+ SsaDeadPhiElimination(graph).Run();
+ x86::CodeGeneratorX86 codegen(graph);
+ SsaLivenessAnalysis liveness(*graph, &codegen);
+ liveness.Analyze();
+ RegisterAllocator register_allocator(&allocator, &codegen, liveness);
+
+ // Add an artifical range to cover the temps that will be put in the unhandled list.
+ LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
+ unhandled->AddLoopRange(0, 60);
+
+ // Add three temps holding the same register, and starting at different positions.
+ // Put the one that should be picked in the middle of the inactive list to ensure
+ // we do not depend on an order.
+ LiveInterval* interval = LiveInterval::MakeTempInterval(&allocator, nullptr, Primitive::kPrimInt);
+ interval->SetRegister(0);
+ interval->AddRange(40, 50);
+ register_allocator.inactive_.Add(interval);
+
+ interval = LiveInterval::MakeTempInterval(&allocator, nullptr, Primitive::kPrimInt);
+ interval->SetRegister(0);
+ interval->AddRange(20, 30);
+ register_allocator.inactive_.Add(interval);
+
+ interval = LiveInterval::MakeTempInterval(&allocator, nullptr, Primitive::kPrimInt);
+ interval->SetRegister(0);
+ interval->AddRange(60, 70);
+ register_allocator.inactive_.Add(interval);
+
+ register_allocator.number_of_registers_ = 1;
+ register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+ register_allocator.processing_core_registers_ = true;
+ register_allocator.unhandled_ = &register_allocator.unhandled_core_intervals_;
+
+ register_allocator.TryAllocateFreeReg(unhandled);
+
+ // Check that we have split the interval.
+ ASSERT_EQ(1u, register_allocator.unhandled_->Size());
+ // Check that we know need to find a new register where the next interval
+ // that uses the register starts.
+ ASSERT_EQ(20u, register_allocator.unhandled_->Get(0)->GetStart());
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 088a5c4240..99fd9ebacb 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -83,6 +83,9 @@ static void TestCode(const uint16_t* data, const char* expected) {
HGraph* graph = builder.BuildGraph(*item);
ASSERT_NE(graph, nullptr);
+ // Suspend checks implementation may change in the future, and this test relies
+ // on how instructions are ordered.
+ RemoveSuspendChecks(graph);
graph->BuildDominatorTree();
graph->TransformToSSA();
ReNumberInstructions(graph);
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
new file mode 100644
index 0000000000..2e48ee8e7e
--- /dev/null
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "builder.h"
+#include "dex_instruction.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+/**
+ * Check that the HGraphBuilder adds suspend checks to backward branches.
+ */
+
+static void TestCode(const uint16_t* data) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraphBuilder builder(&allocator);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = builder.BuildGraph(*item);
+ ASSERT_NE(graph, nullptr);
+
+ HBasicBlock* first_block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+ HInstruction* first_instruction = first_block->GetFirstInstruction();
+ // Account for some tests having a store local as first instruction.
+ ASSERT_TRUE(first_instruction->IsSuspendCheck()
+ || first_instruction->GetNext()->IsSuspendCheck());
+}
+
+TEST(CodegenTest, CFG1) {
+ const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ Instruction::NOP,
+ Instruction::GOTO | 0xFF00);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, CFG2) {
+ const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ Instruction::GOTO_32, 0, 0);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, CFG3) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 0xFFFF,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, CFG4) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_NE, 0xFFFF,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, CFG5) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQZ, 0xFFFF,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, CFG6) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_NEZ, 0xFFFF,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+} // namespace art