diff options
author | 2019-06-26 22:00:02 +0000 | |
---|---|---|
committer | 2019-06-26 22:00:38 +0000 | |
commit | 2fef66b294417d447630f9d98de68227eef476d3 (patch) | |
tree | 59fc4ea6cf13217eb92fbdd4e39561b5ad518cf0 /compiler/optimizing | |
parent | 21d5994583c679cd5d8573b5d35dbd659bdca2c7 (diff) |
Revert "Make the JIT zygote memory shared."
This reverts commit 05f87217ddc9b4b9186710c0135b918f456c5aef.
Bug: 119800099
Bug: 136110523
Reason for revert: testWebview flaking
Change-Id: I96afa6bc9c56c4aaf5ed72ae370f6f69c096c559
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/inliner.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/instruction_builder.cc | 22 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 12 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 7 | ||||
-rw-r--r-- | compiler/optimizing/sharpening.cc | 41 |
5 files changed, 19 insertions, 66 deletions
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 15155751a7..9ef5ec31d1 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -1814,8 +1814,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, callee_dead_reference_safe, graph_->IsDebuggable(), /* osr= */ false, - /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(), - /* start_instruction_id= */ caller_instruction_counter); + caller_instruction_counter); callee_graph->SetArtMethod(resolved_method); // When they are needed, allocate `inline_stats_` on the Arena instead diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index f8f813e3fd..5e7b57523f 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -29,7 +29,6 @@ #include "driver/dex_compilation_unit.h" #include "driver/compiler_options.h" #include "imtable-inl.h" -#include "jit/jit.h" #include "mirror/dex_cache.h" #include "oat_file.h" #include "optimizing_compiler_stats.h" @@ -1291,20 +1290,15 @@ bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const { // Check if the class will be initialized at runtime. if (cls->IsInitialized()) { Runtime* runtime = Runtime::Current(); - if (runtime->IsAotCompiler()) { - // Assume loaded only if klass is in the boot image. App classes cannot be assumed - // loaded because we don't even know what class loader will be used to load them. - if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) { - return true; - } - } else { + if (!runtime->IsAotCompiler()) { DCHECK(runtime->UseJitCompilation()); - if (Runtime::Current()->GetJit()->CanAssumeInitialized( - cls.Get(), - graph_->IsCompilingForSharedJitCode())) { - // For JIT, the class cannot revert to an uninitialized state. - return true; - } + // For JIT, the class cannot revert to an uninitialized state. + return true; + } + // Assume loaded only if klass is in the boot image. App classes cannot be assumed + // loaded because we don't even know what class loader will be used to load them. + if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) { + return true; } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 759a8e6638..8ac33a4309 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -320,7 +320,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { bool dead_reference_safe = false, bool debuggable = false, bool osr = false, - bool is_shared_jit_code = false, int start_instruction_id = 0) : allocator_(allocator), arena_stack_(arena_stack), @@ -356,8 +355,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { art_method_(nullptr), inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()), osr_(osr), - cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)), - is_shared_jit_code_(is_shared_jit_code) { + cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) { blocks_.reserve(kDefaultNumberOfBlocks); } @@ -587,10 +585,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { bool IsCompilingOsr() const { return osr_; } - bool IsCompilingForSharedJitCode() const { - return is_shared_jit_code_; - } - ArenaSet<ArtMethod*>& GetCHASingleImplementationList() { return cha_single_implementation_list_; } @@ -780,10 +774,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // List of methods that are assumed to have single implementation. ArenaSet<ArtMethod*> cha_single_implementation_list_; - // Whether we are JIT compiling in the shared region area, putting - // restrictions on, for example, how literals are being generated. - bool is_shared_jit_code_; - friend class SsaBuilder; // For caching constants. friend class SsaLivenessAnalysis; // For the linear order. friend class HInliner; // For the reverse post order. diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 6f3b9feb9d..2153ddd7b4 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -384,7 +384,6 @@ class OptimizingCompiler final : public Compiler { ArtMethod* method, bool baseline, bool osr, - bool is_shared_jit_code, VariableSizedHandleScope* handles) const; CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator, @@ -784,7 +783,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, ArtMethod* method, bool baseline, bool osr, - bool is_shared_jit_code, VariableSizedHandleScope* handles) const { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation); const CompilerOptions& compiler_options = GetCompilerOptions(); @@ -852,8 +850,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, kInvalidInvokeType, dead_reference_safe, compiler_options.GetDebuggable(), - /* osr= */ osr, - /* is_shared_jit_code= */ is_shared_jit_code); + /* osr= */ osr); if (method != nullptr) { graph->SetArtMethod(method); @@ -1110,7 +1107,6 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, method, compiler_options.IsBaseline(), /* osr= */ false, - /* is_shared_jit_code= */ false, &handles)); } } @@ -1372,7 +1368,6 @@ bool OptimizingCompiler::JitCompile(Thread* self, method, baseline, osr, - /* is_shared_jit_code= */ code_cache->IsSharedRegion(*region), &handles)); if (codegen.get() == nullptr) { return false; diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 3e22edc773..8637db13ad 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -19,7 +19,6 @@ #include "art_method-inl.h" #include "base/casts.h" #include "base/enums.h" -#include "base/logging.h" #include "class_linker.h" #include "code_generator.h" #include "driver/compiler_options.h" @@ -27,7 +26,6 @@ #include "gc/heap.h" #include "gc/space/image_space.h" #include "handle_scope-inl.h" -#include "jit/jit.h" #include "mirror/dex_cache.h" #include "mirror/string.h" #include "nodes.h" @@ -100,17 +98,11 @@ HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect( } code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; } else if (Runtime::Current()->UseJitCompilation()) { - if (Runtime::Current()->GetJit()->CanEncodeMethod( - callee, - codegen->GetGraph()->IsCompilingForSharedJitCode())) { - method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress; - method_load_data = reinterpret_cast<uintptr_t>(callee); - code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; - } else { - // Do not sharpen. - method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall; - code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; - } + // JIT or on-device AOT compilation referencing a boot image method. + // Use the method address directly. + method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress; + method_load_data = reinterpret_cast<uintptr_t>(callee); + code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod; } else if (IsInBootImage(callee)) { // Use PC-relative access to the .data.bimg.rel.ro methods array. method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo; @@ -183,16 +175,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind( if (is_in_boot_image) { desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress; } else if (klass != nullptr) { - if (runtime->GetJit()->CanEncodeClass( - klass.Get(), - codegen->GetGraph()->IsCompilingForSharedJitCode())) { - desired_load_kind = HLoadClass::LoadKind::kJitTableAddress; - } else { - // Shared JIT code cannot encode a literal that the GC can move. - VLOG(jit) << "Unable to encode in shared region class literal: " - << klass->PrettyClass(); - desired_load_kind = HLoadClass::LoadKind::kRuntimeCall; - } + desired_load_kind = HLoadClass::LoadKind::kJitTableAddress; } else { // Class not loaded yet. This happens when the dex code requesting // this `HLoadClass` hasn't been executed in the interpreter. @@ -348,18 +331,10 @@ void HSharpening::ProcessLoadString( DCHECK(!codegen->GetCompilerOptions().GetCompilePic()); string = class_linker->LookupString(string_index, dex_cache.Get()); if (string != nullptr) { - gc::Heap* heap = runtime->GetHeap(); - if (heap->ObjectIsInBootImageSpace(string)) { + if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) { desired_load_kind = HLoadString::LoadKind::kJitBootImageAddress; - } else if (runtime->GetJit()->CanEncodeString( - string, - codegen->GetGraph()->IsCompilingForSharedJitCode())) { - desired_load_kind = HLoadString::LoadKind::kJitTableAddress; } else { - // Shared JIT code cannot encode a literal that the GC can move. - VLOG(jit) << "Unable to encode in shared region string literal: " - << string->ToModifiedUtf8(); - desired_load_kind = HLoadString::LoadKind::kRuntimeCall; + desired_load_kind = HLoadString::LoadKind::kJitTableAddress; } } else { desired_load_kind = HLoadString::LoadKind::kRuntimeCall; |