summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Chris Jones <christopher.jones@arm.com> 2024-01-05 16:30:58 +0000
committer Treehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com> 2025-02-19 03:02:56 -0800
commit0e8091312485670e84ee17daf25256e5836112b0 (patch)
tree37e94c4a289fe9d9296d1e1d57854f824b7e9fdb /compiler
parent0c35b6b7b0f9cd98220f0b291071733e44c97a54 (diff)
[Sim] Add a restricted mode setup
This CL introduces a special ART setup for all modes (host, target), which is going to be a base for VIXL Simulator Stage 1 setup. This setup can be enabled by exporting ART_USE_RESTRICTED_MODE=true to the environment before building ART. The setup limits ART by doing the following: - Limits the GC to only non-concurent Mark&Sweep. - Turns off the Nterp (so only switch C++ intepreter to be used). - Turns off the JNI compiler (GenericJNITrampoline to be used). - Rejects the compilation of all method (except for the allow list). - Turns off compilation and support of intrinsics. - Turns off implicit null checks. - Turns off implicit suspend checks. - Turns off introspection entrypoints. - Turns off special behavior for critical native methods. - Turns off compiler CHECKER tool. With these limitations it will be easier to start off an initial Simulator Stage 1 setup. As the limitations are set for all the modes, it will be easy to compare/debug the workflow for the simulator mode and to compare it to host and target runs. The CL also adds sections in knownfailures.json for tests that fail in this special setup. Also cleanup some read barrier entrypoint declarations in entrypoints_init_arm64.cc to match the definitions in quick_entrypoints_arm64.S and slightly refactor Runtime::Init to stay below the 500 line limit. Author: Artem Serov <artem.serov@linaro.org> Artem Serov <artem.serov@arm.com> Chris Jones <christopher.jones@arm.com> Test: export ART_USE_RESTRICTED_MODE=true test.py --host --target Change-Id: I87319cf339646dc13b9086b00af08882b01603c8
Diffstat (limited to 'compiler')
-rw-r--r--compiler/driver/compiler_options.h6
-rw-r--r--compiler/optimizing/code_generator.cc1
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc6
-rw-r--r--compiler/optimizing/optimizing_compiler.cc64
4 files changed, 77 insertions, 0 deletions
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 36ecf88199..a3957ce232 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -101,7 +101,13 @@ class CompilerOptions final {
}
bool IsJniCompilationEnabled() const {
+#ifdef ART_USE_RESTRICTED_MODE
+ // TODO(Simulator): Support JNICompiler.
+ // Without the JNI compiler, GenericJNITrampoline will be used for JNI calls.
+ return false;
+#else
return CompilerFilter::IsJniCompilationEnabled(compiler_filter_);
+#endif
}
bool IsVerificationEnabled() const {
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e84cfcbe80..5c2e4dbc51 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1569,6 +1569,7 @@ bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
HNullCheck* null_check = instr->GetImplicitNullCheck();
if (null_check != nullptr) {
+ DCHECK(compiler_options_.GetImplicitNullChecks());
RecordPcInfo(null_check);
}
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 31e617baec..98aa5600b4 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -200,12 +200,18 @@ class InvokePolymorphicSlowPathARM64 : public SlowPathCodeARM64 {
#undef __
bool IntrinsicLocationsBuilderARM64::TryDispatch(HInvoke* invoke) {
+#ifdef ART_USE_RESTRICTED_MODE
+ // TODO(Simulator): support intrinsics.
+ USE(invoke);
+ return false;
+#else
Dispatch(invoke);
LocationSummary* res = invoke->GetLocations();
if (res == nullptr) {
return false;
}
return res->Intrinsified();
+#endif // ART_USE_RESTRICTED_MODE
}
#define __ masm->
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index ef84827653..970771424b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -759,6 +759,51 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
return compiled_method;
}
+#ifdef ART_USE_RESTRICTED_MODE
+
+// This class acts as a filter and enables gradual enablement of ART Simulator work - we
+// compile (and hence simulate) only limited types of methods.
+class CompilationFilterForRestrictedMode : public HGraphDelegateVisitor {
+ public:
+ explicit CompilationFilterForRestrictedMode(HGraph* graph)
+ : HGraphDelegateVisitor(graph),
+ has_unsupported_instructions_(false) {}
+
+ // Returns true if the graph contains instructions which are not currently supported in
+ // the restricted mode.
+ bool GraphRejected() const { return has_unsupported_instructions_; }
+
+ private:
+ void VisitInstruction(HInstruction*) override {
+ // Currently we don't support compiling methods unless they were annotated with $compile$.
+ RejectGraph();
+ }
+ void RejectGraph() {
+ has_unsupported_instructions_ = true;
+ }
+
+ bool has_unsupported_instructions_;
+};
+
+// Returns whether an ArtMethod, specified by a name, should be compiled. Used in restricted
+// mode.
+//
+// In restricted mode, the simulator will execute only those methods which are compiled; thus
+// this is going to be an effective filter for methods to be simulated.
+//
+// TODO(Simulator): compile and simulate all the methods as in regular host mode.
+bool ShouldMethodBeCompiled(HGraph* graph, const std::string& method_name) {
+ if (method_name.find("$compile$") != std::string::npos) {
+ return true;
+ }
+
+ CompilationFilterForRestrictedMode filter_visitor(graph);
+ filter_visitor.VisitReversePostOrder();
+
+ return !filter_visitor.GraphRejected();
+}
+#endif // ART_USE_RESTRICTED_MODE
+
CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
ArenaStack* arena_stack,
const DexCompilationUnit& dex_compilation_unit,
@@ -958,6 +1003,17 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
return nullptr;
}
+#ifdef ART_USE_RESTRICTED_MODE
+ // Check whether the method should be compiled according to the compilation filter. Note: this
+ // relies on a LocationSummary being available for each instruction so should take place after
+ // register allocation does liveness analysis.
+ // TODO(Simulator): support and compile all methods.
+ std::string method_name = dex_file.PrettyMethod(method_idx);
+ if (!ShouldMethodBeCompiled(graph, method_name)) {
+ return nullptr;
+ }
+#endif // ART_USE_RESTRICTED_MODE
+
codegen->Compile();
pass_observer.DumpDisassembly();
@@ -977,6 +1033,11 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
+ // TODO(Simulator): Reenable compilation of intrinsics.
+#ifdef ART_USE_RESTRICTED_MODE
+ return nullptr;
+#endif // ART_USE_RESTRICTED_MODE
+
// Always use the Thumb-2 assembler: some runtime functionality
// (like implicit stack overflow checks) assume Thumb-2.
DCHECK_NE(instruction_set, InstructionSet::kArm);
@@ -1149,6 +1210,8 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
}
}
+ // TODO(Simulator): Check for $opt$ in method name and that such method is compiled.
+#ifndef ART_USE_RESTRICTED_MODE
if (kIsDebugBuild &&
compiler_options.CompileArtTest() &&
IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
@@ -1160,6 +1223,7 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
bool shouldCompile = method_name.find("$opt$") != std::string::npos;
DCHECK_IMPLIES(compiled_method == nullptr, !shouldCompile) << "Didn't compile " << method_name;
}
+#endif // #ifndef ART_USE_RESTRICTED_MODE
return compiled_method;
}