summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/driver/compiler_driver.cc134
-rw-r--r--compiler/driver/compiler_driver.h10
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/optimizing/builder.h6
-rw-r--r--compiler/optimizing/code_generator.cc6
-rw-r--r--compiler/optimizing/code_generator_arm.cc16
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc19
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc16
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc9
-rw-r--r--compiler/optimizing/code_generator_mips64.cc11
-rw-r--r--compiler/optimizing/code_generator_x86.cc18
-rw-r--r--compiler/optimizing/code_generator_x86.h4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc18
-rw-r--r--compiler/optimizing/code_generator_x86_64.h4
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.cc5
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.cc5
-rw-r--r--compiler/optimizing/inliner.cc26
-rw-r--r--compiler/optimizing/inliner.h2
-rw-r--r--compiler/optimizing/instruction_builder.cc142
-rw-r--r--compiler/optimizing/instruction_builder.h13
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--compiler/optimizing/load_store_elimination.cc2
-rw-r--r--compiler/optimizing/nodes.cc43
-rw-r--r--compiler/optimizing/nodes.h83
-rw-r--r--compiler/optimizing/nodes_test.cc8
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc14
-rw-r--r--compiler/optimizing/reference_type_propagation.cc29
-rw-r--r--compiler/optimizing/sharpening.cc32
-rw-r--r--compiler/optimizing/sharpening.h2
-rw-r--r--compiler/optimizing/stack_map_stream.cc68
-rw-r--r--compiler/optimizing/stack_map_stream.h8
-rw-r--r--compiler/optimizing/stack_map_test.cc53
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc2
-rw-r--r--compiler/utils/x86/assembler_x86.cc145
-rw-r--r--compiler/utils/x86/assembler_x86.h23
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc92
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc162
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h22
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc72
42 files changed, 927 insertions, 407 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 29502666ad..faf8b41be1 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1060,13 +1060,13 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
virtual bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : c->GetMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m, pointer_size);
+ ResolveExceptionsForMethod(&m);
}
return true;
}
private:
- void ResolveExceptionsForMethod(ArtMethod* method_handle, PointerSize pointer_size)
+ void ResolveExceptionsForMethod(ArtMethod* method_handle)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
@@ -1088,8 +1088,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
dex::TypeIndex encoded_catch_handler_handlers_type_idx =
dex::TypeIndex(DecodeUnsignedLeb128(&encoded_catch_handler_list));
// Add to set of types to resolve if not already in the dex cache resolved types
- if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx,
- pointer_size)) {
+ if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) {
exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx,
method_handle->GetDexFile());
}
@@ -1950,66 +1949,82 @@ static void PopulateVerifiedMethods(const DexFile& dex_file,
DCHECK(!it.HasNext());
}
-void CompilerDriver::Verify(jobject jclass_loader,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
+bool CompilerDriver::FastVerify(jobject jclass_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
verifier::VerifierDeps* verifier_deps =
Runtime::Current()->GetCompilerCallbacks()->GetVerifierDeps();
// If there is an existing `VerifierDeps`, try to use it for fast verification.
- if (verifier_deps != nullptr) {
- TimingLogger::ScopedTiming t("Fast Verify", timings);
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
- MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (verifier_deps->ValidateDependencies(class_loader, soa.Self())) {
- // We successfully validated the dependencies, now update class status
- // of verified classes. Note that the dependencies also record which classes
- // could not be fully verified; we could try again, but that would hurt verification
- // time. So instead we assume these classes still need to be verified at
- // runtime.
- for (const DexFile* dex_file : dex_files) {
- // Fetch the list of unverified classes and turn it into a set for faster
- // lookups.
- const std::vector<dex::TypeIndex>& unverified_classes =
- verifier_deps->GetUnverifiedClasses(*dex_file);
- std::set<dex::TypeIndex> set(unverified_classes.begin(), unverified_classes.end());
- for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- if (set.find(class_def.class_idx_) == set.end()) {
- if (!GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
- // Just update the compiled_classes_ map. The compiler doesn't need to resolve
- // the type.
- compiled_classes_.Overwrite(
- ClassReference(dex_file, i), new CompiledClass(mirror::Class::kStatusVerified));
- } else {
- // Resolve the type, so later compilation stages know they don't need to verify
- // the class.
- const char* descriptor = dex_file->GetClassDescriptor(class_def);
- cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
- if (cls.Get() != nullptr) {
- ObjectLock<mirror::Class> lock(soa.Self(), cls);
- mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
- } else {
- DCHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
- }
- // Create `VerifiedMethod`s for each methods, the compiler expects one for
- // quickening or compiling.
- // Note that this means:
- // - We're only going to compile methods that did verify.
- // - Quickening will not do checkcast ellision.
- // TODO(ngeoffray): Reconsider this once we refactor compiler filters.
- PopulateVerifiedMethods(*dex_file, i, verification_results_);
+ if (verifier_deps == nullptr) {
+ return false;
+ }
+ TimingLogger::ScopedTiming t("Fast Verify", timings);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
+ MutableHandle<mirror::Class> cls(hs.NewHandle<mirror::Class>(nullptr));
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (!verifier_deps->ValidateDependencies(class_loader, soa.Self())) {
+ return false;
+ }
+
+ // We successfully validated the dependencies, now update class status
+ // of verified classes. Note that the dependencies also record which classes
+ // could not be fully verified; we could try again, but that would hurt verification
+ // time. So instead we assume these classes still need to be verified at
+ // runtime.
+ for (const DexFile* dex_file : dex_files) {
+ // Fetch the list of unverified classes and turn it into a set for faster
+ // lookups.
+ const std::vector<dex::TypeIndex>& unverified_classes =
+ verifier_deps->GetUnverifiedClasses(*dex_file);
+ std::set<dex::TypeIndex> set(unverified_classes.begin(), unverified_classes.end());
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ if (set.find(class_def.class_idx_) == set.end()) {
+ if (!GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
+ // Just update the compiled_classes_ map. The compiler doesn't need to resolve
+ // the type.
+ compiled_classes_.Overwrite(
+ ClassReference(dex_file, i), new CompiledClass(mirror::Class::kStatusVerified));
+ } else {
+ // Resolve the type, so later compilation stages know they don't need to verify
+ // the class.
+ const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader));
+ if (cls.Get() != nullptr) {
+ // Check that the class is resolved with the current dex file. We might get
+ // a boot image class, or a class in a different dex file for multidex, and
+ // we should not update the status in that case.
+ if (&cls->GetDexFile() == dex_file) {
+ ObjectLock<mirror::Class> lock(soa.Self(), cls);
+ mirror::Class::SetStatus(cls, mirror::Class::kStatusVerified, soa.Self());
}
+ } else {
+ DCHECK(soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException();
}
+ // Create `VerifiedMethod`s for each methods, the compiler expects one for
+ // quickening or compiling.
+ // Note that this means:
+ // - We're only going to compile methods that did verify.
+ // - Quickening will not do checkcast ellision.
+ // TODO(ngeoffray): Reconsider this once we refactor compiler filters.
+ PopulateVerifiedMethods(*dex_file, i, verification_results_);
}
}
- return;
}
}
+ return true;
+}
+
+void CompilerDriver::Verify(jobject jclass_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
+ if (FastVerify(jclass_loader, dex_files, timings)) {
+ return;
+ }
// If there is no existing `verifier_deps` (because of non-existing vdex), or
// the existing `verifier_deps` is not valid anymore, create a new one for
@@ -2017,7 +2032,7 @@ void CompilerDriver::Verify(jobject jclass_loader,
// Then dex2oat can update the vdex file with these new dependencies.
if (!GetCompilerOptions().IsBootImage()) {
// Create the main VerifierDeps, and set it to this thread.
- verifier_deps = new verifier::VerifierDeps(dex_files);
+ verifier::VerifierDeps* verifier_deps = new verifier::VerifierDeps(dex_files);
Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps(verifier_deps);
Thread::Current()->SetVerifierDeps(verifier_deps);
// Create per-thread VerifierDeps to avoid contention on the main one.
@@ -2026,6 +2041,7 @@ void CompilerDriver::Verify(jobject jclass_loader,
worker->GetThread()->SetVerifierDeps(new verifier::VerifierDeps(dex_files));
}
}
+
// Note: verification should not be pulling in classes anymore when compiling the boot image,
// as all should have been resolved before. As such, doing this in parallel should still
// be deterministic.
@@ -2041,6 +2057,7 @@ void CompilerDriver::Verify(jobject jclass_loader,
if (!GetCompilerOptions().IsBootImage()) {
// Merge all VerifierDeps into the main one.
+ verifier::VerifierDeps* verifier_deps = Thread::Current()->GetVerifierDeps();
for (ThreadPoolWorker* worker : parallel_thread_pool_->GetWorkers()) {
verifier::VerifierDeps* thread_deps = worker->GetThread()->GetVerifierDeps();
worker->GetThread()->SetVerifierDeps(nullptr);
@@ -2061,7 +2078,10 @@ class VerifyClassVisitor : public CompilationVisitor {
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
if (!manager_->GetCompiler()->ShouldVerifyClassBasedOnProfile(dex_file, class_def_index)) {
- // Skip verification since the class is not in the profile.
+ // Skip verification since the class is not in the profile, and let the VerifierDeps know
+ // that the class will need to be verified at runtime.
+ verifier::VerifierDeps::MaybeRecordVerificationStatus(
+ dex_file, dex::TypeIndex(class_def_index), verifier::MethodVerifier::kSoftFailure);
return;
}
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2e3b7c8eb5..6bfdd4da9c 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -433,12 +433,18 @@ class CompilerDriver {
TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_);
+ // Do fast verification through VerifierDeps if possible. Return whether
+ // verification was successful.
// NO_THREAD_SAFETY_ANALYSIS as the method accesses a guarded value in a
// single-threaded way.
+ bool FastVerify(jobject class_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings)
+ NO_THREAD_SAFETY_ANALYSIS;
+
void Verify(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings)
- NO_THREAD_SAFETY_ANALYSIS;
+ TimingLogger* timings);
void VerifyDexFile(jobject class_loader,
const DexFile& dex_file,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 86d92ff0b5..c69ed3198b 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -487,7 +487,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(164 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ EXPECT_EQ(159 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index f896f1199e..8cf4089eba 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -63,7 +63,8 @@ class HGraphBuilder : public ValueObject {
driver,
interpreter_metadata,
compiler_stats,
- dex_cache) {}
+ dex_cache,
+ handles) {}
// Only for unit testing.
HGraphBuilder(HGraph* graph,
@@ -90,7 +91,8 @@ class HGraphBuilder : public ValueObject {
/* compiler_driver */ nullptr,
/* interpreter_metadata */ nullptr,
/* compiler_stats */ nullptr,
- null_dex_cache_) {}
+ null_dex_cache_,
+ handles) {}
GraphAnalysisResult BuildGraph();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6c680c8dc6..70c2738010 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -936,10 +936,10 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
if (environment->GetParent() != nullptr) {
// We emit the parent environment first.
EmitEnvironment(environment->GetParent(), slow_path);
- stack_map_stream_.BeginInlineInfoEntry(environment->GetMethodIdx(),
+ stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
environment->GetDexPc(),
- environment->GetInvokeType(),
- environment->Size());
+ environment->Size(),
+ &graph_->GetDexFile());
}
// Walk over the environment, and record the location of dex registers.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ef4bd1e59d..07b174698a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5770,7 +5770,9 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5821,8 +5823,9 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
@@ -5842,7 +5845,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
case HLoadClass::LoadKind::kJitTableAddress: {
__ LoadLiteral(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
@@ -7348,8 +7351,9 @@ Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
Literal* CodeGeneratorARM::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index bd237e96a5..52d18575ff 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -496,7 +496,7 @@ class CodeGeneratorARM : public CodeGenerator {
Handle<mirror::String> handle);
Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a9617e1212..b094e54f8a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4013,7 +4013,7 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
// Add ADRP with its PC-relative DexCache access patch.
- const DexFile& dex_file = invoke->GetDexFile();
+ const DexFile& dex_file = invoke->GetDexFileForPcRelativeDexCache();
uint32_t element_offset = invoke->GetDexCacheArrayOffset();
vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
@@ -4181,8 +4181,9 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
- const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ const DexFile& dex_file, dex::TypeIndex type_index, Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
@@ -4377,7 +4378,9 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -4426,8 +4429,10 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK(cls->GetAddress() != 0u && IsUint<32>(cls->GetAddress()));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -4452,7 +4457,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index c7a06145e4..a9dca92980 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -579,7 +579,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
Handle<mirror::String> handle);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex string_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f496d29b59..ecabc58c4d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -5848,7 +5848,9 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5894,8 +5896,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
@@ -5910,7 +5913,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
- cls->GetAddress()));
+ cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
@@ -7468,8 +7471,9 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 0f0a9540ba..be653535ea 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -579,7 +579,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
Handle<mirror::String> handle);
VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
dex::TypeIndex type_index,
- uint64_t address);
+ Handle<mirror::Class> handle);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 1f4ff279e8..24234e18c1 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5490,7 +5490,9 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5548,8 +5550,9 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK(!kEmitCompilerReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
base_or_current_method_reg,
codegen_->DeduplicateBootImageAddressLiteral(address));
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index a350de7a51..fc8fb7acb2 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3395,7 +3395,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
uint32_t offset = invoke->GetDexCacheArrayOffset();
CodeGeneratorMIPS64::PcRelativePatchInfo* info =
- NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset);
+ NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset);
EmitPcRelativeAddressPlaceholderHigh(info, AT);
__ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
break;
@@ -3524,7 +3524,9 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -3569,8 +3571,9 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK(!kEmitCompilerReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ LoadLiteral(out,
kLoadUnsignedWord,
codegen_->DeduplicateBootImageAddressLiteral(address));
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a850d38dca..cc727d2068 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4521,7 +4521,7 @@ Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO
__ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -6055,15 +6055,18 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
dex::TypeIndex dex_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
+ reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_class_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -6110,8 +6113,9 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
}
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address));
codegen_->RecordSimplePatch();
break;
@@ -6127,7 +6131,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootClassPatch(
- cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
+ cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index b86d080aa6..9eb97658da 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -419,7 +419,9 @@ class CodeGeneratorX86 : public CodeGenerator {
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex dex_index,
Handle<mirror::String> handle);
- Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+ Label* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2691af8245..9adedab130 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -991,7 +991,7 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat
Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFileForPcRelativeDexCache(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -5482,15 +5482,18 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
dex::TypeIndex dex_index,
- uint64_t address) {
- jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ Handle<mirror::Class> handle) {
+ jit_class_roots_.Overwrite(
+ TypeReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference()));
// Add a patch entry and return the label.
jit_class_patches_.emplace_back(dex_file, dex_index.index_);
PatchInfo<Label>* info = &jit_class_patches_.back();
return &info->label;
}
-void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
+// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
+// move.
+void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) {
codegen_->GenerateLoadClassRuntimeCall(cls);
@@ -5528,8 +5531,9 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
break;
case HLoadClass::LoadKind::kBootImageAddress: {
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ uint32_t address = dchecked_integral_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
+ DCHECK_NE(address, 0u);
__ movl(out, Immediate(address)); // Zero-extended.
codegen_->RecordSimplePatch();
break;
@@ -5547,7 +5551,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
Label* fixup_label =
- codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
+ codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 8b3ab4c438..3438b8159f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -416,7 +416,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex dex_index,
Handle<mirror::String> handle);
- Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
+ Label* NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ Handle<mirror::Class> handle);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index a3140d00cb..9ddcd563ca 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -64,9 +64,10 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
// we need to add the dex cache arrays base as the special input.
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderARMType>(invoke, codegen_)) {
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
+ HArmDexCacheArraysBase* base =
+ GetOrCreateDexCacheArrayBase(invoke->GetDexFileForPcRelativeDexCache());
// Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFile());
+ DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 9a34f544ad..04a4294c48 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -59,9 +59,10 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
// Initialize base for target method dex file if needed.
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
+ HMipsDexCacheArraysBase* base =
+ GetOrCreateDexCacheArrayBase(invoke->GetDexFileForPcRelativeDexCache());
// Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFile());
+ DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFileForPcRelativeDexCache());
base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d7da46bbe7..78a4251e3a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -474,10 +474,10 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
HInstruction* receiver = invoke_instruction->InputAt(0);
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- Handle<mirror::Class> handle = handles_->NewHandle(GetMonomorphicType(classes));
+ Handle<mirror::Class> monomorphic_type = handles_->NewHandle(GetMonomorphicType(classes));
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
- ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+ ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
/* do_rtp */ false,
/* cha_devirtualize */ false)) {
return false;
@@ -488,7 +488,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
cursor,
bb_cursor,
class_index,
- GetMonomorphicType(classes),
+ monomorphic_type,
invoke_instruction,
/* with_deoptimization */ true);
@@ -533,11 +533,9 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
HInstruction* cursor,
HBasicBlock* bb_cursor,
dex::TypeIndex class_index,
- mirror::Class* klass,
+ Handle<mirror::Class> klass,
HInstruction* invoke_instruction,
bool with_deoptimization) {
- ScopedAssertNoThreadSuspension sants("Adding compiler type guard");
-
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
HInstanceFieldGet* receiver_class = BuildGetReceiverClass(
class_linker, receiver, invoke_instruction->GetDexPc());
@@ -548,19 +546,20 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
}
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
- bool is_referrer = (klass == outermost_graph_->GetArtMethod()->GetDeclaringClass());
+ bool is_referrer = (klass.Get() == outermost_graph_->GetArtMethod()->GetDeclaringClass());
// Note that we will just compare the classes, so we don't need Java semantics access checks.
// Note that the type index and the dex file are relative to the method this type guard is
// inlined into.
HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
class_index,
caller_dex_file,
+ klass,
is_referrer,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
bb_cursor->InsertInstructionAfter(load_class, receiver_class);
// Sharpen after adding the instruction, as the sharpening may remove inputs.
- HSharpening::SharpenClass(load_class, klass, handles_, codegen_, compiler_driver_);
+ HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
// TODO: Extend reference type propagation to understand the guard.
HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -637,7 +636,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
cursor,
bb_cursor,
class_index,
- handle.Get(),
+ handle,
invoke_instruction,
deoptimize);
if (deoptimize) {
@@ -1539,8 +1538,6 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
}
}
- PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
// the signature.
@@ -1552,9 +1549,9 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
++param_idx, ++input_idx) {
HInstruction* input = invoke_instruction->InputAt(input_idx);
if (input->GetType() == Primitive::kPrimNot) {
- mirror::Class* param_cls = resolved_method->GetDexCacheResolvedType(
+ mirror::Class* param_cls = resolved_method->GetClassFromTypeIndex(
param_list->GetTypeItem(param_idx).type_idx_,
- pointer_size);
+ /* resolve */ false);
if (IsReferenceTypeRefinement(GetClassRTI(param_cls),
/* declared_can_be_null */ true,
input)) {
@@ -1603,8 +1600,7 @@ void HInliner::FixUpReturnReferenceType(ArtMethod* resolved_method,
// TODO: we could be more precise by merging the phi inputs but that requires
// some functionality from the reference type propagation.
DCHECK(return_replacement->IsPhi());
- PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- mirror::Class* cls = resolved_method->GetReturnType(false /* resolve */, pointer_size);
+ mirror::Class* cls = resolved_method->GetReturnType(false /* resolve */);
return_replacement->SetReferenceTypeInfo(GetClassRTI(cls));
}
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 4c0b990f26..11aacab802 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -170,7 +170,7 @@ class HInliner : public HOptimization {
HInstruction* cursor,
HBasicBlock* bb_cursor,
dex::TypeIndex class_index,
- mirror::Class* klass,
+ Handle<mirror::Class> klass,
HInstruction* invoke_instruction,
bool with_deoptimization)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 3cfabddf3a..8ed0e7fa06 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -208,10 +208,8 @@ void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) {
HEnvironment* environment = new (arena_) HEnvironment(
arena_,
current_locals_->size(),
- graph_->GetDexFile(),
- graph_->GetMethodIdx(),
+ graph_->GetArtMethod(),
instruction->GetDexPc(),
- graph_->GetInvokeType(),
instruction);
environment->CopyFrom(*current_locals_);
instruction->SetRawEnvironment(environment);
@@ -936,48 +934,40 @@ bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction
bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
- bool finalizable;
- bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable);
-
- // Only the access check entrypoint handles the finalizable class case. If we
- // need access checks, then we haven't resolved the method and the class may
- // again be finalizable.
- QuickEntrypointEnum entrypoint = (finalizable || needs_access_check)
- ? kQuickAllocObjectWithChecks
- : kQuickAllocObjectInitialized;
-
if (outer_dex_cache.Get() != dex_cache.Get()) {
// We currently do not support inlining allocations across dex files.
return false;
}
- HLoadClass* load_class = new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- type_index,
- outer_dex_file,
- IsOutermostCompilingClass(type_index),
- dex_pc,
- needs_access_check);
+ HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
- AppendInstruction(load_class);
HInstruction* cls = load_class;
- if (!IsInitialized(resolved_class)) {
+ Handle<mirror::Class> klass = load_class->GetClass();
+
+ if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
+ // Only the access check entrypoint handles the finalizable class case. If we
+ // need access checks, then we haven't resolved the method and the class may
+ // again be finalizable.
+ QuickEntrypointEnum entrypoint = kQuickAllocObjectInitialized;
+ if (load_class->NeedsAccessCheck() || klass->IsFinalizable() || !klass->IsInstantiable()) {
+ entrypoint = kQuickAllocObjectWithChecks;
+ }
+
+ // Consider classes we haven't resolved as potentially finalizable.
+ bool finalizable = (klass.Get() == nullptr) || klass->IsFinalizable();
+
AppendInstruction(new (arena_) HNewInstance(
cls,
dex_pc,
type_index,
*dex_compilation_unit_->GetDexFile(),
- needs_access_check,
finalizable,
entrypoint));
return true;
@@ -1018,7 +1008,6 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
ArtMethod* resolved_method,
uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
@@ -1046,15 +1035,9 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
} else if (storage_index.IsValid()) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- HLoadClass* load_class = new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- storage_index,
- outer_dex_file,
- is_outer_class,
- dex_pc,
- /*needs_access_check*/ false);
- AppendInstruction(load_class);
- clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
+ HLoadClass* cls = BuildLoadClass(
+ storage_index, dex_pc, /* check_access */ false, /* outer */ true);
+ clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
AppendInstruction(clinit_check);
}
return clinit_check;
@@ -1376,7 +1359,6 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
@@ -1404,16 +1386,10 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
}
- HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(),
- storage_index,
- outer_dex_file,
- is_outer_class,
- dex_pc,
- /*needs_access_check*/ false);
- AppendInstruction(constant);
+ HLoadClass* constant = BuildLoadClass(
+ storage_index, dex_pc, /* check_access */ false, /* outer */ true);
HInstruction* cls = constant;
-
Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
if (!IsInitialized(klass)) {
cls = new (arena_) HClinitCheck(constant, dex_pc);
@@ -1660,33 +1636,53 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
}
}
-void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
- uint8_t destination,
- uint8_t reference,
- dex::TypeIndex type_index,
- uint32_t dex_pc) {
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+ uint32_t dex_pc,
+ bool check_access,
+ bool outer) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
-
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(),
- dex_cache,
- type_index);
+ const DexCompilationUnit* compilation_unit =
+ outer ? outer_compilation_unit_ : dex_compilation_unit_;
+ const DexFile& dex_file = *compilation_unit->GetDexFile();
+ Handle<mirror::DexCache> dex_cache = compilation_unit->GetDexCache();
+ bool is_accessible = false;
+ Handle<mirror::Class> klass = handles_->NewHandle(dex_cache->GetResolvedType(type_index));
+ if (!check_access) {
+ is_accessible = true;
+ } else if (klass.Get() != nullptr) {
+ if (klass->IsPublic()) {
+ is_accessible = true;
+ } else {
+ mirror::Class* compiling_class = GetCompilingClass();
+ if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
+ is_accessible = true;
+ }
+ }
+ }
- HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(
+ HLoadClass* load_class = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
dex_file,
- IsOutermostCompilingClass(type_index),
+ klass,
+ klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
dex_pc,
- !can_access);
- AppendInstruction(cls);
+ !is_accessible);
- TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class);
+ AppendInstruction(load_class);
+ return load_class;
+}
+
+void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ dex::TypeIndex type_index,
+ uint32_t dex_pc) {
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+
+ ScopedObjectAccess soa(Thread::Current());
+ TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
UpdateLocal(destination, current_block_->GetLastInstruction());
@@ -2690,21 +2686,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::CONST_CLASS: {
dex::TypeIndex type_index(instruction.VRegB_21c());
- // `CanAccessTypeWithoutChecks` will tell whether the method being
- // built is trying to access its own class, so that the generated
- // code can optimize for this case. However, the optimization does not
- // work for inlining, so we use `IsOutermostCompilingClass` instead.
- ScopedObjectAccess soa(Thread::Current());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), dex_cache, type_index);
- AppendInstruction(new (arena_) HLoadClass(
- graph_->GetCurrentMethod(),
- type_index,
- *dex_file_,
- IsOutermostCompilingClass(type_index),
- dex_pc,
- !can_access));
+ BuildLoadClass(type_index, dex_pc, /* check_access */ true);
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index aef0b94c1f..5efe95094c 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -46,9 +46,11 @@ class HInstructionBuilder : public ValueObject {
CompilerDriver* driver,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ VariableSizedHandleScope* handles)
: arena_(graph->GetArena()),
graph_(graph),
+ handles_(handles),
dex_file_(dex_file),
code_item_(code_item),
return_type_(return_type),
@@ -223,6 +225,14 @@ class HInstructionBuilder : public ValueObject {
// Builds an instruction sequence for a switch statement.
void BuildSwitch(const Instruction& instruction, uint32_t dex_pc);
+ // Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
+ // this method will use the outer class's dex file to lookup the type at
+ // `type_index`.
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+ uint32_t dex_pc,
+ bool check_access,
+ bool outer = false);
+
// Returns the outer-most compiling method's class.
mirror::Class* GetOutermostCompilingClass() const;
@@ -282,6 +292,7 @@ class HInstructionBuilder : public ValueObject {
ArenaAllocator* const arena_;
HGraph* const graph_;
+ VariableSizedHandleScope* handles_;
// The dex file where the method being compiled is, and the bytecode data.
const DexFile* const dex_file_;
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index fc6ff7b197..17d683f357 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -145,7 +145,7 @@ void IntrinsicsRecognizer::Run() {
if (!CheckInvokeType(intrinsic, invoke)) {
LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
<< intrinsic << " for "
- << invoke->GetDexFile().PrettyMethod(invoke->GetDexMethodIndex())
+ << art_method->PrettyMethod()
<< invoke->DebugName();
} else {
invoke->SetIntrinsic(intrinsic,
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 4f30b11753..2d3c00fb97 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -979,7 +979,7 @@ class LSEVisitor : public HGraphVisitor {
}
if (ref_info->IsSingletonAndRemovable() &&
!new_instance->IsFinalizable() &&
- !new_instance->NeedsAccessCheck()) {
+ !new_instance->NeedsChecks()) {
singleton_new_instances_.push_back(new_instance);
}
ArenaVector<HInstruction*>& heap_values =
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 0af0d19849..d15145e673 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2391,6 +2391,14 @@ bool HInvoke::NeedsEnvironment() const {
return !opt.GetDoesNotNeedEnvironment();
}
+const DexFile& HInvokeStaticOrDirect::GetDexFileForPcRelativeDexCache() const {
+ ArtMethod* caller = GetEnvironment()->GetMethod();
+ ScopedObjectAccess soa(Thread::Current());
+ // `caller` is null for a top-level graph representing a method whose declaring
+ // class was not resolved.
+ return caller == nullptr ? GetBlock()->GetGraph()->GetDexFile() : *caller->GetDexFile();
+}
+
bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
if (GetMethodLoadKind() != MethodLoadKind::kDexCacheViaMethod) {
return false;
@@ -2434,17 +2442,6 @@ std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckReq
}
}
-// Helper for InstructionDataEquals to fetch the mirror Class out
-// from a kJitTableAddress LoadClass kind.
-// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
-// mirrors, they are stored in a variable size handle scope which is always
-// visited during a pause. Also, the only caller of this helper
-// only uses the mirror for pointer comparison.
-static inline mirror::Class* AsMirrorInternal(uint64_t address)
- NO_THREAD_SAFETY_ANALYSIS {
- return reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr();
-}
-
bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
const HLoadClass* other_load_class = other->AsLoadClass();
// TODO: To allow GVN for HLoadClass from different dex files, we should compare the type
@@ -2455,9 +2452,10 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
}
switch (GetLoadKind()) {
case LoadKind::kBootImageAddress:
- return GetAddress() == other_load_class->GetAddress();
- case LoadKind::kJitTableAddress:
- return AsMirrorInternal(GetAddress()) == AsMirrorInternal(other_load_class->GetAddress());
+ case LoadKind::kJitTableAddress: {
+ ScopedObjectAccess soa(Thread::Current());
+ return GetClass().Get() == other_load_class->GetClass().Get();
+ }
default:
DCHECK(HasTypeReference(GetLoadKind()));
return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
@@ -2502,17 +2500,6 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
}
}
-// Helper for InstructionDataEquals to fetch the mirror String out
-// from a kJitTableAddress LoadString kind.
-// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
-// mirrors, they are stored in a variable size handle scope which is always
-// visited during a pause. Also, the only caller of this helper
-// only uses the mirror for pointer comparison.
-static inline mirror::String* AsMirrorInternal(Handle<mirror::String> handle)
- NO_THREAD_SAFETY_ANALYSIS {
- return handle.Get();
-}
-
bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
const HLoadString* other_load_string = other->AsLoadString();
// TODO: To allow GVN for HLoadString from different dex files, we should compare the strings
@@ -2523,8 +2510,10 @@ bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
}
switch (GetLoadKind()) {
case LoadKind::kBootImageAddress:
- case LoadKind::kJitTableAddress:
- return AsMirrorInternal(GetString()) == AsMirrorInternal(other_load_string->GetString());
+ case LoadKind::kJitTableAddress: {
+ ScopedObjectAccess soa(Thread::Current());
+ return GetString().Get() == other_load_string->GetString().Get();
+ }
default:
return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile());
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3e7914c8c5..53b0fdde75 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1724,28 +1724,22 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
public:
HEnvironment(ArenaAllocator* arena,
size_t number_of_vregs,
- const DexFile& dex_file,
- uint32_t method_idx,
+ ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
HInstruction* holder)
: vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
locations_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
- dex_file_(dex_file),
- method_idx_(method_idx),
+ method_(method),
dex_pc_(dex_pc),
- invoke_type_(invoke_type),
holder_(holder) {
}
HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
: HEnvironment(arena,
to_copy.Size(),
- to_copy.GetDexFile(),
- to_copy.GetMethodIdx(),
+ to_copy.GetMethod(),
to_copy.GetDexPc(),
- to_copy.GetInvokeType(),
holder) {}
void SetAndCopyParentChain(ArenaAllocator* allocator, HEnvironment* parent) {
@@ -1794,16 +1788,8 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
return dex_pc_;
}
- uint32_t GetMethodIdx() const {
- return method_idx_;
- }
-
- InvokeType GetInvokeType() const {
- return invoke_type_;
- }
-
- const DexFile& GetDexFile() const {
- return dex_file_;
+ ArtMethod* GetMethod() const {
+ return method_;
}
HInstruction* GetHolder() const {
@@ -1819,10 +1805,8 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
ArenaVector<HUserRecord<HEnvironment*>> vregs_;
ArenaVector<Location> locations_;
HEnvironment* parent_;
- const DexFile& dex_file_;
- const uint32_t method_idx_;
+ ArtMethod* method_;
const uint32_t dex_pc_;
- const InvokeType invoke_type_;
// The instruction that holds this environment.
HInstruction* const holder_;
@@ -3784,14 +3768,12 @@ class HNewInstance FINAL : public HExpression<1> {
uint32_t dex_pc,
dex::TypeIndex type_index,
const DexFile& dex_file,
- bool needs_access_check,
bool finalizable,
QuickEntrypointEnum entrypoint)
: HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
entrypoint_(entrypoint) {
- SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagFinalizable>(finalizable);
SetRawInputAt(0, cls);
}
@@ -3805,8 +3787,9 @@ class HNewInstance FINAL : public HExpression<1> {
// Can throw errors when out-of-memory or if it's not instantiable/accessible.
bool CanThrow() const OVERRIDE { return true; }
- // Needs to call into runtime to make sure it's instantiable/accessible.
- bool NeedsAccessCheck() const { return GetPackedFlag<kFlagNeedsAccessCheck>(); }
+ bool NeedsChecks() const {
+ return entrypoint_ == kQuickAllocObjectWithChecks;
+ }
bool IsFinalizable() const { return GetPackedFlag<kFlagFinalizable>(); }
@@ -3823,8 +3806,7 @@ class HNewInstance FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(NewInstance);
private:
- static constexpr size_t kFlagNeedsAccessCheck = kNumberOfExpressionPackedBits;
- static constexpr size_t kFlagFinalizable = kFlagNeedsAccessCheck + 1;
+ static constexpr size_t kFlagFinalizable = kNumberOfExpressionPackedBits;
static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
@@ -3870,7 +3852,6 @@ class HInvoke : public HVariableInputSizeInstruction {
Primitive::Type GetType() const OVERRIDE { return GetPackedField<ReturnTypeField>(); }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
- const DexFile& GetDexFile() const { return GetEnvironment()->GetDexFile(); }
InvokeType GetInvokeType() const {
return GetPackedField<InvokeTypeField>();
@@ -4190,6 +4171,8 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
return dispatch_info_.method_load_data;
}
+ const DexFile& GetDexFileForPcRelativeDexCache() const;
+
ClinitCheckRequirement GetClinitCheckRequirement() const {
return GetPackedField<ClinitCheckRequirementField>();
}
@@ -5449,10 +5432,10 @@ class HBoundsCheck FINAL : public HExpression<2> {
HBoundsCheck(HInstruction* index,
HInstruction* length,
uint32_t dex_pc,
- uint32_t string_char_at_method_index = DexFile::kDexNoIndex)
- : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc),
- string_char_at_method_index_(string_char_at_method_index) {
+ bool string_char_at = false)
+ : HExpression(index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
DCHECK_EQ(Primitive::kPrimInt, Primitive::PrimitiveKind(index->GetType()));
+ SetPackedFlag<kFlagIsStringCharAt>(string_char_at);
SetRawInputAt(0, index);
SetRawInputAt(1, length);
}
@@ -5466,22 +5449,14 @@ class HBoundsCheck FINAL : public HExpression<2> {
bool CanThrow() const OVERRIDE { return true; }
- bool IsStringCharAt() const { return GetStringCharAtMethodIndex() != DexFile::kDexNoIndex; }
- uint32_t GetStringCharAtMethodIndex() const { return string_char_at_method_index_; }
+ bool IsStringCharAt() const { return GetPackedFlag<kFlagIsStringCharAt>(); }
HInstruction* GetIndex() const { return InputAt(0); }
DECLARE_INSTRUCTION(BoundsCheck);
private:
- // We treat a String as an array, creating the HBoundsCheck from String.charAt()
- // intrinsic in the instruction simplifier. We want to include the String.charAt()
- // in the stack trace if we actually throw the StringIndexOutOfBoundsException,
- // so we need to create an HEnvironment which will be translated to an InlineInfo
- // indicating the extra stack frame. Since we add this HEnvironment quite late,
- // in the PrepareForRegisterAllocation pass, we need to remember the method index
- // from the invoke as we don't want to look again at the dex bytecode.
- uint32_t string_char_at_method_index_; // DexFile::kDexNoIndex if regular array.
+ static constexpr size_t kFlagIsStringCharAt = kNumberOfExpressionPackedBits;
DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
@@ -5567,6 +5542,7 @@ class HLoadClass FINAL : public HInstruction {
HLoadClass(HCurrentMethod* current_method,
dex::TypeIndex type_index,
const DexFile& dex_file,
+ Handle<mirror::Class> klass,
bool is_referrers_class,
uint32_t dex_pc,
bool needs_access_check)
@@ -5574,7 +5550,7 @@ class HLoadClass FINAL : public HInstruction {
special_input_(HUserRecord<HInstruction*>(current_method)),
type_index_(type_index),
dex_file_(dex_file),
- address_(0u),
+ klass_(klass),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
// Referrers class should not need access check. We never inline unverified
// methods so we can't possibly end up in this situation.
@@ -5587,10 +5563,7 @@ class HLoadClass FINAL : public HInstruction {
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
- void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) {
- DCHECK(HasAddress(load_kind));
- DCHECK_NE(address, 0u);
- address_ = address;
+ void SetLoadKind(LoadKind load_kind) {
SetLoadKindInternal(load_kind);
}
@@ -5657,11 +5630,6 @@ class HLoadClass FINAL : public HInstruction {
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
- uint64_t GetAddress() const {
- DCHECK(HasAddress(GetLoadKind()));
- return address_;
- }
-
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
return GetLoadKind() == LoadKind::kDexCacheViaMethod;
}
@@ -5691,6 +5659,10 @@ class HLoadClass FINAL : public HInstruction {
return Primitive::kPrimNot;
}
+ Handle<mirror::Class> GetClass() const {
+ return klass_;
+ }
+
DECLARE_INSTRUCTION(LoadClass);
private:
@@ -5714,11 +5686,6 @@ class HLoadClass FINAL : public HInstruction {
load_kind == LoadKind::kDexCacheViaMethod;
}
- static bool HasAddress(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageAddress ||
- load_kind == LoadKind::kJitTableAddress;
- }
-
void SetLoadKindInternal(LoadKind load_kind);
// The special input is the HCurrentMethod for kDexCacheViaMethod or kReferrersClass.
@@ -5729,7 +5696,7 @@ class HLoadClass FINAL : public HInstruction {
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
- uint64_t address_; // Up to 64-bit, needed for kJitTableAddress on 64-bit targets.
+ Handle<mirror::Class> klass_;
ReferenceTypeInfo loaded_class_rti_;
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 5d9a6528ca..7686ba851b 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -52,7 +52,7 @@ TEST(Node, RemoveInstruction) {
exit_block->AddInstruction(new (&allocator) HExit());
HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, null_check);
+ &allocator, 1, graph->GetArtMethod(), 0, null_check);
null_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, parameter);
parameter->AddEnvUseAt(null_check->GetEnvironment(), 0);
@@ -137,7 +137,7 @@ TEST(Node, ParentEnvironment) {
ASSERT_TRUE(parameter1->GetUses().HasExactlyOneElement());
HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, with_environment);
+ &allocator, 1, graph->GetArtMethod(), 0, with_environment);
ArenaVector<HInstruction*> array(allocator.Adapter());
array.push_back(parameter1);
@@ -148,13 +148,13 @@ TEST(Node, ParentEnvironment) {
ASSERT_TRUE(parameter1->GetEnvUses().HasExactlyOneElement());
HEnvironment* parent1 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, nullptr);
+ &allocator, 1, graph->GetArtMethod(), 0, nullptr);
parent1->CopyFrom(array);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
HEnvironment* parent2 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic, nullptr);
+ &allocator, 1, graph->GetArtMethod(), 0, nullptr);
parent2->CopyFrom(array);
parent1->SetAndCopyParentChain(&allocator, parent2);
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index db7c1fbb06..efbaf6c221 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -16,6 +16,9 @@
#include "prepare_for_register_allocation.h"
+#include "jni_internal.h"
+#include "well_known_classes.h"
+
namespace art {
void PrepareForRegisterAllocation::Run() {
@@ -42,16 +45,12 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
if (check->IsStringCharAt()) {
// Add a fake environment for String.charAt() inline info as we want
// the exception to appear as being thrown from there.
- const DexFile& dex_file = check->GetEnvironment()->GetDexFile();
- DCHECK_STREQ(dex_file.PrettyMethod(check->GetStringCharAtMethodIndex()).c_str(),
- "char java.lang.String.charAt(int)");
+ ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
ArenaAllocator* arena = GetGraph()->GetArena();
HEnvironment* environment = new (arena) HEnvironment(arena,
/* number_of_vregs */ 0u,
- dex_file,
- check->GetStringCharAtMethodIndex(),
+ char_at_method,
/* dex_pc */ DexFile::kDexNoIndex,
- kVirtual,
check);
check->InsertRawEnvironment(environment);
}
@@ -199,8 +198,7 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input,
return false;
}
if (user_environment->GetDexPc() != input_environment->GetDexPc() ||
- user_environment->GetMethodIdx() != input_environment->GetMethodIdx() ||
- !IsSameDexFile(user_environment->GetDexFile(), input_environment->GetDexFile())) {
+ user_environment->GetMethod() != input_environment->GetMethod()) {
return false;
}
user_environment = user_environment->GetParent();
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f8a4469712..8854a2b08b 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -295,13 +295,13 @@ static void BoundTypeForClassCheck(HInstruction* check) {
}
if (check->IsIf()) {
- HBasicBlock* trueBlock = check->IsEqual()
+ HBasicBlock* trueBlock = compare->IsEqual()
? check->AsIf()->IfTrueSuccessor()
: check->AsIf()->IfFalseSuccessor();
BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
} else {
DCHECK(check->IsDeoptimize());
- if (check->IsEqual()) {
+ if (compare->IsEqual()) {
BoundTypeIn(receiver, check->GetBlock(), check, class_rti);
}
}
@@ -499,18 +499,19 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
// Calls to String.<init> are replaced with a StringFactory.
if (kIsDebugBuild) {
- HInvoke* invoke = instr->AsInvoke();
+ HInvokeStaticOrDirect* invoke = instr->AsInvokeStaticOrDirect();
ClassLinker* cl = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
+ const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
Handle<mirror::DexCache> dex_cache(
- hs.NewHandle(FindDexCacheWithHint(self, invoke->GetDexFile(), hint_dex_cache_)));
+ hs.NewHandle(FindDexCacheWithHint(self, dex_file, hint_dex_cache_)));
// Use a null loader. We should probably use the compiling method's class loader,
// but then we would need to pass it to RTPVisitor just for this debug check. Since
// the method is from the String class, the null loader is good enough.
Handle<mirror::ClassLoader> loader;
ArtMethod* method = cl->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
- invoke->GetDexFile(), invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
+ dex_file, invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
DCHECK(method != nullptr);
mirror::Class* declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
@@ -619,14 +620,10 @@ void ReferenceTypePropagation::RTPVisitor::VisitUnresolvedStaticFieldGet(
void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
- // Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = GetClassFromDexCache(soa.Self(),
- instr->GetDexFile(),
- instr->GetTypeIndex(),
- hint_dex_cache_);
- if (IsAdmissible(resolved_class)) {
+ Handle<mirror::Class> resolved_class = instr->GetClass();
+ if (IsAdmissible(resolved_class.Get())) {
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
- handle_cache_->NewHandle(resolved_class), /* is_exact */ true));
+ resolved_class, /* is_exact */ true));
}
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
@@ -843,12 +840,8 @@ void ReferenceTypePropagation::RTPVisitor::VisitInvoke(HInvoke* instr) {
}
ScopedObjectAccess soa(Thread::Current());
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache =
- FindDexCacheWithHint(soa.Self(), instr->GetDexFile(), hint_dex_cache_);
- PointerSize pointer_size = cl->GetImagePointerSize();
- ArtMethod* method = dex_cache->GetResolvedMethod(instr->GetDexMethodIndex(), pointer_size);
- mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false, pointer_size);
+ ArtMethod* method = instr->GetResolvedMethod();
+ mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(/* resolve */ false);
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index eabda2675e..c5294107ae 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -133,24 +133,13 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const DexFile& dex_file = load_class->GetDexFile();
- dex::TypeIndex type_index = load_class->GetTypeIndex();
- Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile())
- ? compilation_unit_.GetDexCache()
- : hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
- mirror::Class* cls = dex_cache->GetResolvedType(type_index);
- SharpenClass(load_class, cls, handles_, codegen_, compiler_driver_);
+ SharpenClass(load_class, codegen_, compiler_driver_);
}
void HSharpening::SharpenClass(HLoadClass* load_class,
- mirror::Class* klass,
- VariableSizedHandleScope* handles,
CodeGenerator* codegen,
CompilerDriver* compiler_driver) {
- ScopedAssertNoThreadSuspension sants("Sharpening class in compiler");
+ Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
<< load_class->GetLoadKind();
@@ -174,7 +163,6 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
bool is_in_boot_image = false;
HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
- uint64_t address = 0u; // Class or dex cache element address.
Runtime* runtime = Runtime::Current();
if (codegen->GetCompilerOptions().IsBootImage()) {
// Compiling boot image. Check if the class is a boot image class.
@@ -182,7 +170,7 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
if (!compiler_driver->GetSupportBootImageFixup()) {
// compiler_driver_test. Do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
- } else if ((klass != nullptr) && compiler_driver->IsImageClass(
+ } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
is_in_boot_image = true;
desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
@@ -194,20 +182,16 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
} else {
- is_in_boot_image = (klass != nullptr) && runtime->GetHeap()->ObjectIsInBootImageSpace(klass);
+ is_in_boot_image = (klass.Get() != nullptr) &&
+ runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
if (is_in_boot_image) {
// TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(klass);
- } else if (klass != nullptr) {
+ } else if (klass.Get() != nullptr) {
desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
- // We store in the address field the location of the stack reference maintained
- // by the handle. We do this now so that the code generation does not need to figure
- // out which class loader to use.
- address = reinterpret_cast<uint64_t>(handles->NewHandle(klass).GetReference());
} else {
// Class not loaded yet. This happens when the dex code requesting
// this `HLoadClass` hasn't been executed in the interpreter.
@@ -218,7 +202,6 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
} else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
// AOT app compilation. Check if the class is in the boot image.
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- address = reinterpret_cast64<uint64_t>(klass);
} else {
// Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
desired_load_kind = HLoadClass::LoadKind::kBssEntry;
@@ -240,8 +223,7 @@ void HSharpening::SharpenClass(HLoadClass* load_class,
break;
case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
- DCHECK_NE(address, 0u);
- load_class->SetLoadKindWithAddress(load_kind, address);
+ load_class->SetLoadKind(load_kind);
break;
default:
LOG(FATAL) << "Unexpected load kind: " << load_kind;
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae5ccb33ab..ae3d83ef2c 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -49,8 +49,6 @@ class HSharpening : public HOptimization {
// Used internally but also by the inliner.
static void SharpenClass(HLoadClass* load_class,
- mirror::Class* klass,
- VariableSizedHandleScope* handles,
CodeGenerator* codegen,
CompilerDriver* compiler_driver)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index fc8af6462a..6087e36507 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -13,8 +13,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
#include "stack_map_stream.h"
+#include "art_method.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
namespace art {
void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
@@ -98,15 +103,27 @@ void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t
current_dex_register_++;
}
-void StackMapStream::BeginInlineInfoEntry(uint32_t method_index,
+static bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
+ // Note: the runtime is null only for unit testing.
+ return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
+}
+
+void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
- uint32_t num_dex_registers) {
+ uint32_t num_dex_registers,
+ const DexFile* outer_dex_file) {
DCHECK(!in_inline_frame_);
in_inline_frame_ = true;
- current_inline_info_.method_index = method_index;
+ if (EncodeArtMethodInInlineInfo(method)) {
+ current_inline_info_.method = method;
+ } else {
+ if (dex_pc != static_cast<uint32_t>(-1) && kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(IsSameDexFile(*outer_dex_file, *method->GetDexFile()));
+ }
+ current_inline_info_.method_index = method->GetDexMethodIndexUnchecked();
+ }
current_inline_info_.dex_pc = dex_pc;
- current_inline_info_.invoke_type = invoke_type;
current_inline_info_.num_dex_registers = num_dex_registers;
current_inline_info_.dex_register_locations_start_index = dex_register_locations_.size();
if (num_dex_registers != 0) {
@@ -229,25 +246,32 @@ size_t StackMapStream::ComputeDexRegisterMapsSize() const {
void StackMapStream::ComputeInlineInfoEncoding() {
uint32_t method_index_max = 0;
uint32_t dex_pc_max = DexFile::kDexNoIndex;
- uint32_t invoke_type_max = 0;
+ uint32_t extra_data_max = 0;
uint32_t inline_info_index = 0;
for (const StackMapEntry& entry : stack_maps_) {
for (size_t j = 0; j < entry.inlining_depth; ++j) {
InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
- method_index_max = std::max(method_index_max, inline_entry.method_index);
+ if (inline_entry.method == nullptr) {
+ method_index_max = std::max(method_index_max, inline_entry.method_index);
+ extra_data_max = std::max(extra_data_max, 1u);
+ } else {
+ method_index_max = std::max(
+ method_index_max, High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ extra_data_max = std::max(
+ extra_data_max, Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ }
if (inline_entry.dex_pc != DexFile::kDexNoIndex &&
(dex_pc_max == DexFile::kDexNoIndex || dex_pc_max < inline_entry.dex_pc)) {
dex_pc_max = inline_entry.dex_pc;
}
- invoke_type_max = std::max(invoke_type_max, static_cast<uint32_t>(inline_entry.invoke_type));
}
}
DCHECK_EQ(inline_info_index, inline_infos_.size());
inline_info_encoding_.SetFromSizes(method_index_max,
dex_pc_max,
- invoke_type_max,
+ extra_data_max,
dex_register_maps_size_);
}
@@ -354,9 +378,20 @@ void StackMapStream::FillIn(MemoryRegion region) {
DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
- inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
+ if (inline_entry.method != nullptr) {
+ inline_info.SetMethodIndexAtDepth(
+ inline_info_encoding_,
+ depth,
+ High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ inline_info.SetExtraDataAtDepth(
+ inline_info_encoding_,
+ depth,
+ Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
+ } else {
+ inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
+ inline_info.SetExtraDataAtDepth(inline_info_encoding_, depth, 1);
+ }
inline_info.SetDexPcAtDepth(inline_info_encoding_, depth, inline_entry.dex_pc);
- inline_info.SetInvokeTypeAtDepth(inline_info_encoding_, depth, inline_entry.invoke_type);
if (inline_entry.num_dex_registers == 0) {
// No dex map available.
inline_info.SetDexRegisterMapOffsetAtDepth(inline_info_encoding_,
@@ -544,10 +579,13 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, d),
inline_entry.dex_pc);
- DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
- inline_entry.method_index);
- DCHECK_EQ(inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, d),
- inline_entry.invoke_type);
+ if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, d)) {
+ DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info_encoding, d),
+ inline_entry.method);
+ } else {
+ DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
+ inline_entry.method_index);
+ }
CheckDexRegisterMap(code_info,
code_info.GetDexRegisterMapAtDepth(
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 53a9795d52..d6f42b373c 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -109,8 +109,8 @@ class StackMapStream : public ValueObject {
struct InlineInfoEntry {
uint32_t dex_pc; // DexFile::kDexNoIndex for intrinsified native methods.
+ ArtMethod* method;
uint32_t method_index;
- InvokeType invoke_type;
uint32_t num_dex_registers;
BitVector* live_dex_registers_mask;
size_t dex_register_locations_start_index;
@@ -126,10 +126,10 @@ class StackMapStream : public ValueObject {
void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value);
- void BeginInlineInfoEntry(uint32_t method_index,
+ void BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
- InvokeType invoke_type,
- uint32_t num_dex_registers);
+ uint32_t num_dex_registers,
+ const DexFile* outer_dex_file = nullptr);
void EndInlineInfoEntry();
size_t GetNumberOfStackMaps() const {
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 967fd96561..22810ea4f7 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -16,6 +16,7 @@
#include "stack_map.h"
+#include "art_method.h"
#include "base/arena_bit_vector.h"
#include "stack_map_stream.h"
@@ -128,6 +129,7 @@ TEST(StackMapTest, Test2) {
ArenaPool pool;
ArenaAllocator arena(&pool);
StackMapStream stream(&arena);
+ ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
sp_mask1.SetBit(2);
@@ -137,9 +139,9 @@ TEST(StackMapTest, Test2) {
stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
- stream.BeginInlineInfoEntry(82, 3, kDirect, number_of_dex_registers_in_inline_info);
+ stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(42, 2, kStatic, number_of_dex_registers_in_inline_info);
+ stream.BeginInlineInfoEntry(&art_method, 2, number_of_dex_registers_in_inline_info);
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
@@ -238,12 +240,10 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info_encoding));
- ASSERT_EQ(82u, inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kDirect, inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kStatic, inline_info.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
}
// Second stack map.
@@ -662,6 +662,7 @@ TEST(StackMapTest, InlineTest) {
ArenaPool pool;
ArenaAllocator arena(&pool);
StackMapStream stream(&arena);
+ ArtMethod art_method;
ArenaBitVector sp_mask1(&arena, 0, true);
sp_mask1.SetBit(2);
@@ -672,10 +673,10 @@ TEST(StackMapTest, InlineTest) {
stream.AddDexRegisterEntry(Kind::kInStack, 0);
stream.AddDexRegisterEntry(Kind::kConstant, 4);
- stream.BeginInlineInfoEntry(42, 2, kStatic, 1);
+ stream.BeginInlineInfoEntry(&art_method, 2, 1);
stream.AddDexRegisterEntry(Kind::kInStack, 8);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.BeginInlineInfoEntry(&art_method, 3, 3);
stream.AddDexRegisterEntry(Kind::kInStack, 16);
stream.AddDexRegisterEntry(Kind::kConstant, 20);
stream.AddDexRegisterEntry(Kind::kInRegister, 15);
@@ -688,15 +689,15 @@ TEST(StackMapTest, InlineTest) {
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
- stream.BeginInlineInfoEntry(42, 2, kDirect, 1);
+ stream.BeginInlineInfoEntry(&art_method, 2, 1);
stream.AddDexRegisterEntry(Kind::kInStack, 12);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.BeginInlineInfoEntry(&art_method, 3, 3);
stream.AddDexRegisterEntry(Kind::kInStack, 80);
stream.AddDexRegisterEntry(Kind::kConstant, 10);
stream.AddDexRegisterEntry(Kind::kInRegister, 5);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 5, kVirtual, 0);
+ stream.BeginInlineInfoEntry(&art_method, 5, 0);
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
@@ -712,12 +713,12 @@ TEST(StackMapTest, InlineTest) {
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
- stream.BeginInlineInfoEntry(42, 2, kVirtual, 0);
+ stream.BeginInlineInfoEntry(&art_method, 2, 0);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 5, kInterface, 1);
+ stream.BeginInlineInfoEntry(&art_method, 5, 1);
stream.AddDexRegisterEntry(Kind::kInRegister, 2);
stream.EndInlineInfoEntry();
- stream.BeginInlineInfoEntry(52, 10, kStatic, 2);
+ stream.BeginInlineInfoEntry(&art_method, 10, 2);
stream.AddDexRegisterEntry(Kind::kNone, 0);
stream.AddDexRegisterEntry(Kind::kInRegister, 3);
stream.EndInlineInfoEntry();
@@ -743,11 +744,9 @@ TEST(StackMapTest, InlineTest) {
InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if0.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(82u, if0.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -769,14 +768,11 @@ TEST(StackMapTest, InlineTest) {
InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kDirect, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(82u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kStatic, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(52u, if1.GetMethodIndexAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(kVirtual, if1.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -810,14 +806,11 @@ TEST(StackMapTest, InlineTest) {
InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info_encoding));
ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(42u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 0));
- ASSERT_EQ(kVirtual, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 0));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 1));
- ASSERT_EQ(kInterface, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 1));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(encoding.inline_info_encoding, 2));
- ASSERT_EQ(kStatic, if2.GetInvokeTypeAtDepth(encoding.inline_info_encoding, 2));
+ ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, 0));
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index ab4f9e944c..a3fce02970 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5610,7 +5610,7 @@ const char* const VixlJniHelpersResults[] = {
" 214: ecbd 8a10 vpop {s16-s31}\n",
" 218: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n",
" 21c: 4660 mov r0, ip\n",
- " 21e: f8d9 c2b0 ldr.w ip, [r9, #688] ; 0x2b0\n",
+ " 21e: f8d9 c2ac ldr.w ip, [r9, #684] ; 0x2ac\n",
" 222: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index cd30872986..d3b15ac8cf 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -350,6 +350,38 @@ void X86Assembler::movaps(XmmRegister dst, XmmRegister src) {
}
+void X86Assembler::movaps(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movups(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movaps(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x29);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movups(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
void X86Assembler::movss(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -467,6 +499,83 @@ void X86Assembler::divss(XmmRegister dst, const Address& src) {
}
+void X86Assembler::addps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movapd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movapd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movupd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movapd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x29);
+ EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movupd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
void X86Assembler::flds(const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xD9);
@@ -638,6 +747,42 @@ void X86Assembler::divsd(XmmRegister dst, const Address& src) {
}
+void X86Assembler::addpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 114986b3e7..a93616c3e5 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -371,7 +371,12 @@ class X86Assembler FINAL : public Assembler {
void setb(Condition condition, Register dst);
- void movaps(XmmRegister dst, XmmRegister src);
+ void movaps(XmmRegister dst, XmmRegister src); // move
+ void movaps(XmmRegister dst, const Address& src); // load aligned
+ void movups(XmmRegister dst, const Address& src); // load unaligned
+ void movaps(const Address& dst, XmmRegister src); // store aligned
+ void movups(const Address& dst, XmmRegister src); // store unaligned
+
void movss(XmmRegister dst, const Address& src);
void movss(const Address& dst, XmmRegister src);
void movss(XmmRegister dst, XmmRegister src);
@@ -388,6 +393,17 @@ class X86Assembler FINAL : public Assembler {
void divss(XmmRegister dst, XmmRegister src);
void divss(XmmRegister dst, const Address& src);
+ void addps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void subps(XmmRegister dst, XmmRegister src);
+ void mulps(XmmRegister dst, XmmRegister src);
+ void divps(XmmRegister dst, XmmRegister src);
+
+ void movapd(XmmRegister dst, XmmRegister src); // move
+ void movapd(XmmRegister dst, const Address& src); // load aligned
+ void movupd(XmmRegister dst, const Address& src); // load unaligned
+ void movapd(const Address& dst, XmmRegister src); // store aligned
+ void movupd(const Address& dst, XmmRegister src); // store unaligned
+
void movsd(XmmRegister dst, const Address& src);
void movsd(const Address& dst, XmmRegister src);
void movsd(XmmRegister dst, XmmRegister src);
@@ -409,6 +425,11 @@ class X86Assembler FINAL : public Assembler {
void divsd(XmmRegister dst, XmmRegister src);
void divsd(XmmRegister dst, const Address& src);
+ void addpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void subpd(XmmRegister dst, XmmRegister src);
+ void mulpd(XmmRegister dst, XmmRegister src);
+ void divpd(XmmRegister dst, XmmRegister src);
+
void cvtsi2ss(XmmRegister dst, Register src);
void cvtsi2sd(XmmRegister dst, Register src);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 9bae6c20bd..4d60a12cb9 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -423,6 +423,98 @@ TEST_F(AssemblerX86Test, TestlAddressImmediate) {
DriverStr(expected, "TestlAddressImmediate");
}
+TEST_F(AssemblerX86Test, Movaps) {
+ DriverStr(RepeatFF(&x86::X86Assembler::movaps, "movaps %{reg2}, %{reg1}"), "movaps");
+}
+
+TEST_F(AssemblerX86Test, MovapsAddr) {
+ GetAssembler()->movaps(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movaps(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movaps 0x4(%ESP), %xmm0\n"
+ "movaps %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movaps_address");
+}
+
+TEST_F(AssemblerX86Test, MovupsAddr) {
+ GetAssembler()->movups(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movups(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movups 0x4(%ESP), %xmm0\n"
+ "movups %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movups_address");
+}
+
+TEST_F(AssemblerX86Test, Movapd) {
+ DriverStr(RepeatFF(&x86::X86Assembler::movapd, "movapd %{reg2}, %{reg1}"), "movapd");
+}
+
+TEST_F(AssemblerX86Test, MovapdAddr) {
+ GetAssembler()->movapd(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movapd(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movapd 0x4(%ESP), %xmm0\n"
+ "movapd %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movapd_address");
+}
+
+TEST_F(AssemblerX86Test, MovupdAddr) {
+ GetAssembler()->movupd(x86::XmmRegister(x86::XMM0), x86::Address(x86::Register(x86::ESP), 4));
+ GetAssembler()->movupd(x86::Address(x86::Register(x86::ESP), 2), x86::XmmRegister(x86::XMM1));
+ const char* expected =
+ "movupd 0x4(%ESP), %xmm0\n"
+ "movupd %xmm1, 0x2(%ESP)\n";
+ DriverStr(expected, "movupd_address");
+}
+
+TEST_F(AssemblerX86Test, AddPS) {
+ GetAssembler()->addps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "addps %xmm1, %xmm0\n";
+ DriverStr(expected, "addps");
+}
+
+TEST_F(AssemblerX86Test, AddPD) {
+ GetAssembler()->addpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "addpd %xmm1, %xmm0\n";
+ DriverStr(expected, "addpd");
+}
+
+TEST_F(AssemblerX86Test, SubPS) {
+ GetAssembler()->subps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "subps %xmm1, %xmm0\n";
+ DriverStr(expected, "subps");
+}
+
+TEST_F(AssemblerX86Test, SubPD) {
+ GetAssembler()->subpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "subpd %xmm1, %xmm0\n";
+ DriverStr(expected, "subpd");
+}
+
+TEST_F(AssemblerX86Test, MulPS) {
+ GetAssembler()->mulps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "mulps %xmm1, %xmm0\n";
+ DriverStr(expected, "mulps");
+}
+
+TEST_F(AssemblerX86Test, MulPD) {
+ GetAssembler()->mulpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "mulpd %xmm1, %xmm0\n";
+ DriverStr(expected, "mulpd");
+}
+
+TEST_F(AssemblerX86Test, DivPS) {
+ GetAssembler()->divps(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "divps %xmm1, %xmm0\n";
+ DriverStr(expected, "divps");
+}
+
+TEST_F(AssemblerX86Test, DivPD) {
+ GetAssembler()->divpd(x86::XmmRegister(x86::XMM0), x86::XmmRegister(x86::XMM1));
+ const char* expected = "divpd %xmm1, %xmm0\n";
+ DriverStr(expected, "divpd");
+}
+
/////////////////
// Near labels //
/////////////////
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index e9a0607290..2366b68f11 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -386,6 +386,42 @@ void X86_64Assembler::movaps(XmmRegister dst, XmmRegister src) {
}
+void X86_64Assembler::movaps(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::movups(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::movaps(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(src, dst);
+ EmitUint8(0x0F);
+ EmitUint8(0x29);
+ EmitOperand(src.LowBits(), dst);
+}
+
+
+void X86_64Assembler::movups(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(src, dst);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src.LowBits(), dst);
+}
+
+
void X86_64Assembler::movss(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -539,6 +575,42 @@ void X86_64Assembler::divss(XmmRegister dst, const Address& src) {
}
+void X86_64Assembler::addps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::subps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::mulps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::divps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::flds(const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xD9);
@@ -560,6 +632,56 @@ void X86_64Assembler::fstps(const Address& dst) {
}
+void X86_64Assembler::movapd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::movapd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x28);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::movupd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::movapd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(src, dst);
+ EmitUint8(0x0F);
+ EmitUint8(0x29);
+ EmitOperand(src.LowBits(), dst);
+}
+
+
+void X86_64Assembler::movupd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(src, dst);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src.LowBits(), dst);
+}
+
+
void X86_64Assembler::movsd(XmmRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
@@ -670,6 +792,46 @@ void X86_64Assembler::divsd(XmmRegister dst, const Address& src) {
}
+void X86_64Assembler::addpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::subpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::mulpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::divpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src) {
cvtsi2ss(dst, src, false);
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index acad86d161..5923a41fe3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -390,7 +390,11 @@ class X86_64Assembler FINAL : public Assembler {
void leaq(CpuRegister dst, const Address& src);
void leal(CpuRegister dst, const Address& src);
- void movaps(XmmRegister dst, XmmRegister src);
+ void movaps(XmmRegister dst, XmmRegister src); // move
+ void movaps(XmmRegister dst, const Address& src); // load aligned
+ void movups(XmmRegister dst, const Address& src); // load unaligned
+ void movaps(const Address& dst, XmmRegister src); // store aligned
+ void movups(const Address& dst, XmmRegister src); // store unaligned
void movss(XmmRegister dst, const Address& src);
void movss(const Address& dst, XmmRegister src);
@@ -413,6 +417,17 @@ class X86_64Assembler FINAL : public Assembler {
void divss(XmmRegister dst, XmmRegister src);
void divss(XmmRegister dst, const Address& src);
+ void addps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void subps(XmmRegister dst, XmmRegister src);
+ void mulps(XmmRegister dst, XmmRegister src);
+ void divps(XmmRegister dst, XmmRegister src);
+
+ void movapd(XmmRegister dst, XmmRegister src); // move
+ void movapd(XmmRegister dst, const Address& src); // load aligned
+ void movupd(XmmRegister dst, const Address& src); // load unaligned
+ void movapd(const Address& dst, XmmRegister src); // store aligned
+ void movupd(const Address& dst, XmmRegister src); // store unaligned
+
void movsd(XmmRegister dst, const Address& src);
void movsd(const Address& dst, XmmRegister src);
void movsd(XmmRegister dst, XmmRegister src);
@@ -426,6 +441,11 @@ class X86_64Assembler FINAL : public Assembler {
void divsd(XmmRegister dst, XmmRegister src);
void divsd(XmmRegister dst, const Address& src);
+ void addpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void subpd(XmmRegister dst, XmmRegister src);
+ void mulpd(XmmRegister dst, XmmRegister src);
+ void divpd(XmmRegister dst, XmmRegister src);
+
void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
void cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit);
void cvtsi2ss(XmmRegister dst, const Address& src, bool is64bit);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index ff01429058..2812c34406 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -986,10 +986,50 @@ TEST_F(AssemblerX86_64Test, Movaps) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::movaps, "movaps %{reg2}, %{reg1}"), "movaps");
}
+TEST_F(AssemblerX86_64Test, MovapsAddr) {
+ GetAssembler()->movaps(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 4));
+ GetAssembler()->movaps(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 2), x86_64::XmmRegister(x86_64::XMM1));
+ const char* expected =
+ "movaps 0x4(%RSP), %xmm0\n"
+ "movaps %xmm1, 0x2(%RSP)\n";
+ DriverStr(expected, "movaps_address");
+}
+
+TEST_F(AssemblerX86_64Test, MovupsAddr) {
+ GetAssembler()->movups(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 4));
+ GetAssembler()->movups(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 2), x86_64::XmmRegister(x86_64::XMM1));
+ const char* expected =
+ "movups 0x4(%RSP), %xmm0\n"
+ "movups %xmm1, 0x2(%RSP)\n";
+ DriverStr(expected, "movups_address");
+}
+
TEST_F(AssemblerX86_64Test, Movss) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::movss, "movss %{reg2}, %{reg1}"), "movss");
}
+TEST_F(AssemblerX86_64Test, Movapd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::movapd, "movapd %{reg2}, %{reg1}"), "movapd");
+}
+
+TEST_F(AssemblerX86_64Test, MovapdAddr) {
+ GetAssembler()->movapd(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 4));
+ GetAssembler()->movapd(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 2), x86_64::XmmRegister(x86_64::XMM1));
+ const char* expected =
+ "movapd 0x4(%RSP), %xmm0\n"
+ "movapd %xmm1, 0x2(%RSP)\n";
+ DriverStr(expected, "movapd_address");
+}
+
+TEST_F(AssemblerX86_64Test, MovupdAddr) {
+ GetAssembler()->movupd(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 4));
+ GetAssembler()->movupd(x86_64::Address(x86_64::CpuRegister(x86_64::RSP), 2), x86_64::XmmRegister(x86_64::XMM1));
+ const char* expected =
+ "movupd 0x4(%RSP), %xmm0\n"
+ "movupd %xmm1, 0x2(%RSP)\n";
+ DriverStr(expected, "movupd_address");
+}
+
TEST_F(AssemblerX86_64Test, Movsd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::movsd, "movsd %{reg2}, %{reg1}"), "movsd");
}
@@ -1010,6 +1050,14 @@ TEST_F(AssemblerX86_64Test, Addsd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::addsd, "addsd %{reg2}, %{reg1}"), "addsd");
}
+TEST_F(AssemblerX86_64Test, Addps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::addps, "addps %{reg2}, %{reg1}"), "addps");
+}
+
+TEST_F(AssemblerX86_64Test, Addpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::addpd, "addpd %{reg2}, %{reg1}"), "addpd");
+}
+
TEST_F(AssemblerX86_64Test, Subss) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::subss, "subss %{reg2}, %{reg1}"), "subss");
}
@@ -1018,6 +1066,14 @@ TEST_F(AssemblerX86_64Test, Subsd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::subsd, "subsd %{reg2}, %{reg1}"), "subsd");
}
+TEST_F(AssemblerX86_64Test, Subps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::subps, "subps %{reg2}, %{reg1}"), "subps");
+}
+
+TEST_F(AssemblerX86_64Test, Subpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::subpd, "subpd %{reg2}, %{reg1}"), "subpd");
+}
+
TEST_F(AssemblerX86_64Test, Mulss) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulss, "mulss %{reg2}, %{reg1}"), "mulss");
}
@@ -1026,6 +1082,14 @@ TEST_F(AssemblerX86_64Test, Mulsd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulsd, "mulsd %{reg2}, %{reg1}"), "mulsd");
}
+TEST_F(AssemblerX86_64Test, Mulps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulps, "mulps %{reg2}, %{reg1}"), "mulps");
+}
+
+TEST_F(AssemblerX86_64Test, Mulpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulpd, "mulpd %{reg2}, %{reg1}"), "mulpd");
+}
+
TEST_F(AssemblerX86_64Test, Divss) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::divss, "divss %{reg2}, %{reg1}"), "divss");
}
@@ -1034,6 +1098,14 @@ TEST_F(AssemblerX86_64Test, Divsd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::divsd, "divsd %{reg2}, %{reg1}"), "divsd");
}
+TEST_F(AssemblerX86_64Test, Divps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::divps, "divps %{reg2}, %{reg1}"), "divps");
+}
+
+TEST_F(AssemblerX86_64Test, Divpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::divpd, "divpd %{reg2}, %{reg1}"), "divpd");
+}
+
TEST_F(AssemblerX86_64Test, Cvtsi2ss) {
DriverStr(RepeatFr(&x86_64::X86_64Assembler::cvtsi2ss, "cvtsi2ss %{reg2}, %{reg1}"), "cvtsi2ss");
}