summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/common_compiler_test.cc413
-rw-r--r--compiler/common_compiler_test.h401
-rw-r--r--compiler/dex/quick/arm/int_arm.cc20
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc22
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h8
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc23
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc78
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc10
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h3
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc85
-rw-r--r--compiler/dex/quick/mir_to_lir.h3
-rw-r--r--compiler/dex/quick_compiler_callbacks.cc39
-rw-r--r--compiler/dex/quick_compiler_callbacks.h (renamed from compiler/driver/compiler_callbacks_impl.h)34
-rw-r--r--compiler/driver/compiler_driver_test.cc3
-rw-r--r--compiler/elf_writer_test.cc2
-rw-r--r--compiler/image_test.cc10
-rw-r--r--compiler/jni/quick/calling_convention.h1
-rw-r--r--compiler/oat_test.cc14
-rw-r--r--compiler/optimizing/code_generator.cc35
-rw-r--r--compiler/optimizing/code_generator.h6
-rw-r--r--compiler/optimizing/code_generator_arm.cc39
-rw-r--r--compiler/optimizing/code_generator_arm.h5
-rw-r--r--compiler/optimizing/code_generator_x86.cc36
-rw-r--r--compiler/optimizing/code_generator_x86.h5
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc41
-rw-r--r--compiler/optimizing/code_generator_x86_64.h5
-rw-r--r--compiler/optimizing/nodes.h5
-rw-r--r--compiler/optimizing/register_allocator.h1
-rw-r--r--compiler/output_stream_test.cc1
30 files changed, 760 insertions, 589 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 98a4c2fbb9..02dad2ae36 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -72,6 +72,7 @@ LIBART_COMPILER_SRC_FILES := \
dex/verification_results.cc \
dex/vreg_analysis.cc \
dex/ssa_transformation.cc \
+ dex/quick_compiler_callbacks.cc \
driver/compiler_driver.cc \
driver/dex_compilation_unit.cc \
jni/quick/arm/calling_convention_arm.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
new file mode 100644
index 0000000000..051cfb6dbc
--- /dev/null
+++ b/compiler/common_compiler_test.cc
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_compiler_test.h"
+
+#if defined(__arm__)
+#include <sys/ucontext.h>
+#endif
+#include <fstream>
+
+#include "class_linker.h"
+#include "compiled_method.h"
+#include "dex/quick_compiler_callbacks.h"
+#include "dex/verification_results.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "driver/compiler_driver.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "interpreter/interpreter.h"
+#include "mirror/art_method.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
+#include "utils.h"
+
+namespace art {
+
+// Normally the ClassLinker supplies this.
+extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+
+#if defined(__arm__)
+// A signal handler called when have an illegal instruction. We record the fact in
+// a global boolean and then increment the PC in the signal context to return to
+// the next instruction. We know the instruction is an sdiv (4 bytes long).
+static void baddivideinst(int signo, siginfo *si, void *data) {
+ UNUSED(signo);
+ UNUSED(si);
+ struct ucontext *uc = (struct ucontext *)data;
+ struct sigcontext *sc = &uc->uc_mcontext;
+ sc->arm_r0 = 0; // set R0 to #0 to signal error
+ sc->arm_pc += 4; // skip offending instruction
+}
+
+// This is in arch/arm/arm_sdiv.S. It does the following:
+// mov r1,#1
+// sdiv r0,r1,r1
+// bx lr
+//
+// the result will be the value 1 if sdiv is supported. If it is not supported
+// a SIGILL signal will be raised and the signal handler (baddivideinst) called.
+// The signal handler sets r0 to #0 and then increments pc beyond the failed instruction.
+// Thus if the instruction is not supported, the result of this function will be #0
+
+extern "C" bool CheckForARMSDIVInstruction();
+
+static InstructionSetFeatures GuessInstructionFeatures() {
+ InstructionSetFeatures f;
+
+ // Uncomment this for processing of /proc/cpuinfo.
+ if (false) {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ std::ifstream in("/proc/cpuinfo");
+ if (in) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ if (line.find("Features") != std::string::npos) {
+ if (line.find("idivt") != std::string::npos) {
+ f.SetHasDivideInstruction(true);
+ }
+ }
+ }
+ in.close();
+ }
+ } else {
+ LOG(INFO) << "Failed to open /proc/cpuinfo";
+ }
+ }
+
+ // See if have a sdiv instruction. Register a signal handler and try to execute
+ // an sdiv instruction. If we get a SIGILL then it's not supported. We can't use
+ // the /proc/cpuinfo method for this because Krait devices don't always put the idivt
+ // feature in the list.
+ struct sigaction sa, osa;
+ sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
+ sa.sa_sigaction = baddivideinst;
+ sigaction(SIGILL, &sa, &osa);
+
+ if (CheckForARMSDIVInstruction()) {
+ f.SetHasDivideInstruction(true);
+ }
+
+ // Restore the signal handler.
+ sigaction(SIGILL, &osa, nullptr);
+
+ // Other feature guesses in here.
+ return f;
+}
+#endif
+
+// Given a set of instruction features from the build, parse it. The
+// input 'str' is a comma separated list of feature names. Parse it and
+// return the InstructionSetFeatures object.
+static InstructionSetFeatures ParseFeatureList(std::string str) {
+ InstructionSetFeatures result;
+ typedef std::vector<std::string> FeatureList;
+ FeatureList features;
+ Split(str, ',', features);
+ for (FeatureList::iterator i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "default") {
+ // Nothing to do.
+ } else if (feature == "div") {
+ // Supports divide instruction.
+ result.SetHasDivideInstruction(true);
+ } else if (feature == "nodiv") {
+ // Turn off support for divide instruction.
+ result.SetHasDivideInstruction(false);
+ } else {
+ LOG(FATAL) << "Unknown instruction set feature: '" << feature << "'";
+ }
+ }
+ // Others...
+ return result;
+}
+
+CommonCompilerTest::CommonCompilerTest() {}
+CommonCompilerTest::~CommonCompilerTest() {}
+
+OatFile::OatMethod CommonCompilerTest::CreateOatMethod(const void* code, const uint8_t* gc_map) {
+ CHECK(code != nullptr);
+ const byte* base;
+ uint32_t code_offset, gc_map_offset;
+ if (gc_map == nullptr) {
+ base = reinterpret_cast<const byte*>(code); // Base of data points at code.
+ base -= kPointerSize; // Move backward so that code_offset != 0.
+ code_offset = kPointerSize;
+ gc_map_offset = 0;
+ } else {
+ // TODO: 64bit support.
+ base = nullptr; // Base of data in oat file, ie 0.
+ code_offset = PointerToLowMemUInt32(code);
+ gc_map_offset = PointerToLowMemUInt32(gc_map);
+ }
+ return OatFile::OatMethod(base, code_offset, gc_map_offset);
+}
+
+void CommonCompilerTest::MakeExecutable(mirror::ArtMethod* method) {
+ CHECK(method != nullptr);
+
+ const CompiledMethod* compiled_method = nullptr;
+ if (!method->IsAbstract()) {
+ mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ compiled_method =
+ compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
+ method->GetDexMethodIndex()));
+ }
+ if (compiled_method != nullptr) {
+ const std::vector<uint8_t>* code = compiled_method->GetQuickCode();
+ const void* code_ptr;
+ if (code != nullptr) {
+ uint32_t code_size = code->size();
+ CHECK_NE(0u, code_size);
+ const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
+ uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table.size();
+ const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
+ uint32_t mapping_table_offset = mapping_table.empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size();
+ OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
+ compiled_method->GetFrameSizeInBytes(),
+ compiled_method->GetCoreSpillMask(),
+ compiled_method->GetFpSpillMask(), code_size);
+
+ header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
+ std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
+ size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table.size();
+ size_t code_offset = compiled_method->AlignCode(size - code_size);
+ size_t padding = code_offset - (size - code_size);
+ chunk->reserve(padding + size);
+ chunk->resize(sizeof(method_header));
+ memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
+ chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
+ chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
+ chunk->insert(chunk->begin(), padding, 0);
+ chunk->insert(chunk->end(), code->begin(), code->end());
+ CHECK_EQ(padding + size, chunk->size());
+ code_ptr = &(*chunk)[code_offset];
+ } else {
+ code = compiled_method->GetPortableCode();
+ code_ptr = &(*code)[0];
+ }
+ MakeExecutable(code_ptr, code->size());
+ const void* method_code = CompiledMethod::CodePointer(code_ptr,
+ compiled_method->GetInstructionSet());
+ LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
+ OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
+ oat_method.LinkMethod(method);
+ method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+ } else {
+ // No code? You must mean to go into the interpreter.
+ // Or the generic JNI...
+ if (!method->IsNative()) {
+ const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
+ : GetQuickToInterpreterBridge();
+ OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
+ oat_method.LinkMethod(method);
+ method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
+ } else {
+ const void* method_code = reinterpret_cast<void*>(art_quick_generic_jni_trampoline);
+
+ OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
+ oat_method.LinkMethod(method);
+ method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
+ }
+ }
+ // Create bridges to transition between different kinds of compiled bridge.
+ if (method->GetEntryPointFromPortableCompiledCode() == nullptr) {
+ method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
+ } else {
+ CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
+ method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
+ method->SetIsPortableCompiled();
+ }
+}
+
+void CommonCompilerTest::MakeExecutable(const void* code_start, size_t code_length) {
+ CHECK(code_start != nullptr);
+ CHECK_NE(code_length, 0U);
+ uintptr_t data = reinterpret_cast<uintptr_t>(code_start);
+ uintptr_t base = RoundDown(data, kPageSize);
+ uintptr_t limit = RoundUp(data + code_length, kPageSize);
+ uintptr_t len = limit - base;
+ int result = mprotect(reinterpret_cast<void*>(base), len, PROT_READ | PROT_WRITE | PROT_EXEC);
+ CHECK_EQ(result, 0);
+
+ // Flush instruction cache
+ // Only uses __builtin___clear_cache if GCC >= 4.3.3
+#if GCC_VERSION >= 40303
+ __builtin___clear_cache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len));
+#else
+ // Only warn if not Intel as Intel doesn't have cache flush instructions.
+#if !defined(__i386__) && !defined(__x86_64__)
+ LOG(WARNING) << "UNIMPLEMENTED: cache flush";
+#endif
+#endif
+}
+
+void CommonCompilerTest::MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name) {
+ std::string class_descriptor(DotToDescriptor(class_name));
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
+ mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
+ CHECK(klass != nullptr) << "Class not found " << class_name;
+ for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
+ MakeExecutable(klass->GetDirectMethod(i));
+ }
+ for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
+ MakeExecutable(klass->GetVirtualMethod(i));
+ }
+}
+
+void CommonCompilerTest::SetUp() {
+ CommonRuntimeTest::SetUp();
+ {
+ ScopedObjectAccess soa(Thread::Current());
+
+ InstructionSet instruction_set = kRuntimeISA;
+
+ // Take the default set of instruction features from the build.
+ InstructionSetFeatures instruction_set_features =
+ ParseFeatureList(Runtime::GetDefaultInstructionSetFeatures());
+
+#if defined(__arm__)
+ InstructionSetFeatures runtime_features = GuessInstructionFeatures();
+
+ // for ARM, do a runtime check to make sure that the features we are passed from
+ // the build match the features we actually determine at runtime.
+ ASSERT_LE(instruction_set_features, runtime_features);
+#endif
+
+ runtime_->SetInstructionSet(instruction_set);
+ for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
+ Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+ if (!runtime_->HasCalleeSaveMethod(type)) {
+ runtime_->SetCalleeSaveMethod(
+ runtime_->CreateCalleeSaveMethod(type), type);
+ }
+ }
+
+ // TODO: make selectable
+ Compiler::Kind compiler_kind
+ = (kUsePortableCompiler) ? Compiler::kPortable : Compiler::kQuick;
+ timer_.reset(new CumulativeLogger("Compilation times"));
+ compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
+ verification_results_.get(),
+ method_inliner_map_.get(),
+ compiler_kind, instruction_set,
+ instruction_set_features,
+ true, new CompilerDriver::DescriptorSet,
+ 2, true, true, timer_.get()));
+ }
+ // We typically don't generate an image in unit tests, disable this optimization by default.
+ compiler_driver_->SetSupportBootImageFixup(false);
+}
+
+void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) {
+ CommonRuntimeTest::SetUpRuntimeOptions(options);
+
+ compiler_options_.reset(new CompilerOptions);
+ verification_results_.reset(new VerificationResults(compiler_options_.get()));
+ method_inliner_map_.reset(new DexFileToMethodInlinerMap);
+ callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
+ method_inliner_map_.get()));
+ options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+}
+
+void CommonCompilerTest::TearDown() {
+ timer_.reset();
+ compiler_driver_.reset();
+ callbacks_.reset();
+ method_inliner_map_.reset();
+ verification_results_.reset();
+ compiler_options_.reset();
+
+ CommonRuntimeTest::TearDown();
+}
+
+void CommonCompilerTest::CompileClass(mirror::ClassLoader* class_loader, const char* class_name) {
+ std::string class_descriptor(DotToDescriptor(class_name));
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
+ mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
+ CHECK(klass != nullptr) << "Class not found " << class_name;
+ for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
+ CompileMethod(klass->GetDirectMethod(i));
+ }
+ for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
+ CompileMethod(klass->GetVirtualMethod(i));
+ }
+}
+
+void CommonCompilerTest::CompileMethod(mirror::ArtMethod* method) {
+ CHECK(method != nullptr);
+ TimingLogger timings("CommonTest::CompileMethod", false, false);
+ TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
+ compiler_driver_->CompileOne(method, &timings);
+ TimingLogger::ScopedTiming t2("MakeExecutable", &timings);
+ MakeExecutable(method);
+}
+
+void CommonCompilerTest::CompileDirectMethod(Handle<mirror::ClassLoader> class_loader,
+ const char* class_name, const char* method_name,
+ const char* signature) {
+ std::string class_descriptor(DotToDescriptor(class_name));
+ Thread* self = Thread::Current();
+ mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
+ CHECK(klass != nullptr) << "Class not found " << class_name;
+ mirror::ArtMethod* method = klass->FindDirectMethod(method_name, signature);
+ CHECK(method != nullptr) << "Direct method not found: "
+ << class_name << "." << method_name << signature;
+ CompileMethod(method);
+}
+
+void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
+ const char* method_name, const char* signature)
+SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string class_descriptor(DotToDescriptor(class_name));
+ Thread* self = Thread::Current();
+ mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
+ CHECK(klass != nullptr) << "Class not found " << class_name;
+ mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
+ CHECK(method != NULL) << "Virtual method not found: "
+ << class_name << "." << method_name << signature;
+ CompileMethod(method);
+}
+
+void CommonCompilerTest::ReserveImageSpace() {
+ // Reserve where the image will be loaded up front so that other parts of test set up don't
+ // accidentally end up colliding with the fixed memory address when we need to load the image.
+ std::string error_msg;
+ image_reservation_.reset(MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<byte*>(ART_BASE_ADDRESS),
+ (size_t)100 * 1024 * 1024, // 100MB
+ PROT_NONE,
+ false /* no need for 4gb flag with fixed mmap*/,
+ &error_msg));
+ CHECK(image_reservation_.get() != nullptr) << error_msg;
+}
+
+void CommonCompilerTest::UnreserveImageSpace() {
+ image_reservation_.reset();
+}
+
+} // namespace art
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index e11f61a285..df06b71c7d 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -17,409 +17,68 @@
#ifndef ART_COMPILER_COMMON_COMPILER_TEST_H_
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
-#include "compiler.h"
-#include "compiler_callbacks.h"
+#include <list>
+#include <vector>
+
#include "common_runtime_test.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "dex/verification_results.h"
-#include "driver/compiler_callbacks_impl.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
+#include "oat_file.h"
namespace art {
+namespace mirror {
+ class ClassLoader;
+} // namespace mirror
-#if defined(__arm__)
-
-#include <sys/ucontext.h>
-
-// A signal handler called when have an illegal instruction. We record the fact in
-// a global boolean and then increment the PC in the signal context to return to
-// the next instruction. We know the instruction is an sdiv (4 bytes long).
-static inline void baddivideinst(int signo, siginfo *si, void *data) {
- UNUSED(signo);
- UNUSED(si);
- struct ucontext *uc = (struct ucontext *)data;
- struct sigcontext *sc = &uc->uc_mcontext;
- sc->arm_r0 = 0; // set R0 to #0 to signal error
- sc->arm_pc += 4; // skip offending instruction
-}
-
-// This is in arch/arm/arm_sdiv.S. It does the following:
-// mov r1,#1
-// sdiv r0,r1,r1
-// bx lr
-//
-// the result will be the value 1 if sdiv is supported. If it is not supported
-// a SIGILL signal will be raised and the signal handler (baddivideinst) called.
-// The signal handler sets r0 to #0 and then increments pc beyond the failed instruction.
-// Thus if the instruction is not supported, the result of this function will be #0
-
-extern "C" bool CheckForARMSDIVInstruction();
-
-static inline InstructionSetFeatures GuessInstructionFeatures() {
- InstructionSetFeatures f;
-
- // Uncomment this for processing of /proc/cpuinfo.
- if (false) {
- // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
- // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
- std::ifstream in("/proc/cpuinfo");
- if (in) {
- while (!in.eof()) {
- std::string line;
- std::getline(in, line);
- if (!in.eof()) {
- if (line.find("Features") != std::string::npos) {
- if (line.find("idivt") != std::string::npos) {
- f.SetHasDivideInstruction(true);
- }
- }
- }
- in.close();
- }
- } else {
- LOG(INFO) << "Failed to open /proc/cpuinfo";
- }
- }
-
- // See if have a sdiv instruction. Register a signal handler and try to execute
- // an sdiv instruction. If we get a SIGILL then it's not supported. We can't use
- // the /proc/cpuinfo method for this because Krait devices don't always put the idivt
- // feature in the list.
- struct sigaction sa, osa;
- sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
- sa.sa_sigaction = baddivideinst;
- sigaction(SIGILL, &sa, &osa);
-
- if (CheckForARMSDIVInstruction()) {
- f.SetHasDivideInstruction(true);
- }
-
- // Restore the signal handler.
- sigaction(SIGILL, &osa, nullptr);
-
- // Other feature guesses in here.
- return f;
-}
-
-#endif
-
-// Given a set of instruction features from the build, parse it. The
-// input 'str' is a comma separated list of feature names. Parse it and
-// return the InstructionSetFeatures object.
-static inline InstructionSetFeatures ParseFeatureList(std::string str) {
- InstructionSetFeatures result;
- typedef std::vector<std::string> FeatureList;
- FeatureList features;
- Split(str, ',', features);
- for (FeatureList::iterator i = features.begin(); i != features.end(); i++) {
- std::string feature = Trim(*i);
- if (feature == "default") {
- // Nothing to do.
- } else if (feature == "div") {
- // Supports divide instruction.
- result.SetHasDivideInstruction(true);
- } else if (feature == "nodiv") {
- // Turn off support for divide instruction.
- result.SetHasDivideInstruction(false);
- } else {
- LOG(FATAL) << "Unknown instruction set feature: '" << feature << "'";
- }
- }
- // Others...
- return result;
-}
+class CompilerDriver;
+class CompilerOptions;
+class CumulativeLogger;
+class DexFileToMethodInlinerMap;
+class VerificationResults;
-// Normally the ClassLinker supplies this.
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
+template<class T> class Handle;
class CommonCompilerTest : public CommonRuntimeTest {
public:
- // Create an OatMethod based on pointers (for unit tests).
- OatFile::OatMethod CreateOatMethod(const void* code,
- const uint8_t* gc_map) {
- CHECK(code != nullptr);
- const byte* base;
- uint32_t code_offset, gc_map_offset;
- if (gc_map == nullptr) {
- base = reinterpret_cast<const byte*>(code); // Base of data points at code.
- base -= kPointerSize; // Move backward so that code_offset != 0.
- code_offset = kPointerSize;
- gc_map_offset = 0;
- } else {
- // TODO: 64bit support.
- base = nullptr; // Base of data in oat file, ie 0.
- code_offset = PointerToLowMemUInt32(code);
- gc_map_offset = PointerToLowMemUInt32(gc_map);
- }
- return OatFile::OatMethod(base,
- code_offset,
- gc_map_offset);
- }
-
- void MakeExecutable(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(method != nullptr);
-
- const CompiledMethod* compiled_method = nullptr;
- if (!method->IsAbstract()) {
- mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
- const DexFile& dex_file = *dex_cache->GetDexFile();
- compiled_method =
- compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
- method->GetDexMethodIndex()));
- }
- if (compiled_method != nullptr) {
- const std::vector<uint8_t>* code = compiled_method->GetQuickCode();
- const void* code_ptr;
- if (code != nullptr) {
- uint32_t code_size = code->size();
- CHECK_NE(0u, code_size);
- const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
- uint32_t vmap_table_offset = vmap_table.empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size();
- const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
- uint32_t mapping_table_offset = mapping_table.empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size();
- OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
- compiled_method->GetFrameSizeInBytes(),
- compiled_method->GetCoreSpillMask(),
- compiled_method->GetFpSpillMask(), code_size);
-
- header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
- std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
- size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table.size();
- size_t code_offset = compiled_method->AlignCode(size - code_size);
- size_t padding = code_offset - (size - code_size);
- chunk->reserve(padding + size);
- chunk->resize(sizeof(method_header));
- memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
- chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
- chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
- chunk->insert(chunk->begin(), padding, 0);
- chunk->insert(chunk->end(), code->begin(), code->end());
- CHECK_EQ(padding + size, chunk->size());
- code_ptr = &(*chunk)[code_offset];
- } else {
- code = compiled_method->GetPortableCode();
- code_ptr = &(*code)[0];
- }
- MakeExecutable(code_ptr, code->size());
- const void* method_code = CompiledMethod::CodePointer(code_ptr,
- compiled_method->GetInstructionSet());
- LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
- OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
- oat_method.LinkMethod(method);
- method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
- } else {
- // No code? You must mean to go into the interpreter.
- // Or the generic JNI...
- if (!method->IsNative()) {
- const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
- : GetQuickToInterpreterBridge();
- OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
- oat_method.LinkMethod(method);
- method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
- } else {
- const void* method_code = reinterpret_cast<void*>(art_quick_generic_jni_trampoline);
+ CommonCompilerTest();
+ ~CommonCompilerTest();
- OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
- oat_method.LinkMethod(method);
- method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
- }
- }
- // Create bridges to transition between different kinds of compiled bridge.
- if (method->GetEntryPointFromPortableCompiledCode() == nullptr) {
- method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
- } else {
- CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
- method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
- method->SetIsPortableCompiled();
- }
- }
+ // Create an OatMethod based on pointers (for unit tests).
+ OatFile::OatMethod CreateOatMethod(const void* code, const uint8_t* gc_map);
- static void MakeExecutable(const void* code_start, size_t code_length) {
- CHECK(code_start != nullptr);
- CHECK_NE(code_length, 0U);
- uintptr_t data = reinterpret_cast<uintptr_t>(code_start);
- uintptr_t base = RoundDown(data, kPageSize);
- uintptr_t limit = RoundUp(data + code_length, kPageSize);
- uintptr_t len = limit - base;
- int result = mprotect(reinterpret_cast<void*>(base), len, PROT_READ | PROT_WRITE | PROT_EXEC);
- CHECK_EQ(result, 0);
+ void MakeExecutable(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Flush instruction cache
- // Only uses __builtin___clear_cache if GCC >= 4.3.3
-#if GCC_VERSION >= 40303
- __builtin___clear_cache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len));
-#else
- LOG(WARNING) << "UNIMPLEMENTED: cache flush";
-#endif
- }
+ static void MakeExecutable(const void* code_start, size_t code_length);
void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string class_descriptor(DotToDescriptor(class_name));
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
- mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
- CHECK(klass != nullptr) << "Class not found " << class_name;
- for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- MakeExecutable(klass->GetDirectMethod(i));
- }
- for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- MakeExecutable(klass->GetVirtualMethod(i));
- }
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
- virtual void SetUp() {
- CommonRuntimeTest::SetUp();
- {
- ScopedObjectAccess soa(Thread::Current());
-
- InstructionSet instruction_set = kNone;
-
- // Take the default set of instruction features from the build.
- InstructionSetFeatures instruction_set_features =
- ParseFeatureList(Runtime::GetDefaultInstructionSetFeatures());
-
-#if defined(__arm__)
- instruction_set = kThumb2;
- InstructionSetFeatures runtime_features = GuessInstructionFeatures();
-
- // for ARM, do a runtime check to make sure that the features we are passed from
- // the build match the features we actually determine at runtime.
- ASSERT_LE(instruction_set_features, runtime_features);
-#elif defined(__aarch64__)
- instruction_set = kArm64;
-#elif defined(__mips__)
- instruction_set = kMips;
-#elif defined(__i386__)
- instruction_set = kX86;
-#elif defined(__x86_64__)
- instruction_set = kX86_64;
-#endif
-
- runtime_->SetInstructionSet(instruction_set);
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
- if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(
- runtime_->CreateCalleeSaveMethod(type), type);
- }
- }
-
- // TODO: make selectable
- Compiler::Kind compiler_kind
- = (kUsePortableCompiler) ? Compiler::kPortable : Compiler::kQuick;
- timer_.reset(new CumulativeLogger("Compilation times"));
- compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
- verification_results_.get(),
- method_inliner_map_.get(),
- compiler_kind, instruction_set,
- instruction_set_features,
- true, new CompilerDriver::DescriptorSet,
- 2, true, true, timer_.get()));
- }
- // We typically don't generate an image in unit tests, disable this optimization by default.
- compiler_driver_->SetSupportBootImageFixup(false);
- }
-
- virtual void SetUpRuntimeOptions(Runtime::Options *options) {
- CommonRuntimeTest::SetUpRuntimeOptions(options);
-
- compiler_options_.reset(new CompilerOptions);
- verification_results_.reset(new VerificationResults(compiler_options_.get()));
- method_inliner_map_.reset(new DexFileToMethodInlinerMap);
- callbacks_.reset(new CompilerCallbacksImpl(verification_results_.get(),
- method_inliner_map_.get()));
- options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
- }
+ virtual void SetUp();
- virtual void TearDown() {
- timer_.reset();
- compiler_driver_.reset();
- callbacks_.reset();
- method_inliner_map_.reset();
- verification_results_.reset();
- compiler_options_.reset();
+ virtual void SetUpRuntimeOptions(RuntimeOptions *options);
- CommonRuntimeTest::TearDown();
- }
+ virtual void TearDown();
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string class_descriptor(DotToDescriptor(class_name));
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
- mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
- CHECK(klass != nullptr) << "Class not found " << class_name;
- for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- CompileMethod(klass->GetDirectMethod(i));
- }
- for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- CompileMethod(klass->GetVirtualMethod(i));
- }
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(method != nullptr);
- TimingLogger timings("CommonTest::CompileMethod", false, false);
- TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
- compiler_driver_->CompileOne(method, &timings);
- TimingLogger::ScopedTiming t2("MakeExecutable", &timings);
- MakeExecutable(method);
- }
+ void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string class_descriptor(DotToDescriptor(class_name));
- Thread* self = Thread::Current();
- mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
- CHECK(klass != nullptr) << "Class not found " << class_name;
- mirror::ArtMethod* method = klass->FindDirectMethod(method_name, signature);
- CHECK(method != nullptr) << "Direct method not found: "
- << class_name << "." << method_name << signature;
- CompileMethod(method);
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string class_descriptor(DotToDescriptor(class_name));
- Thread* self = Thread::Current();
- mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
- CHECK(klass != nullptr) << "Class not found " << class_name;
- mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
- CHECK(method != NULL) << "Virtual method not found: "
- << class_name << "." << method_name << signature;
- CompileMethod(method);
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ReserveImageSpace() {
- // Reserve where the image will be loaded up front so that other parts of test set up don't
- // accidentally end up colliding with the fixed memory address when we need to load the image.
- std::string error_msg;
- image_reservation_.reset(MemMap::MapAnonymous("image reservation",
- reinterpret_cast<byte*>(ART_BASE_ADDRESS),
- (size_t)100 * 1024 * 1024, // 100MB
- PROT_NONE,
- false /* no need for 4gb flag with fixed mmap*/,
- &error_msg));
- CHECK(image_reservation_.get() != nullptr) << error_msg;
- }
+ void ReserveImageSpace();
- void UnreserveImageSpace() {
- image_reservation_.reset();
- }
+ void UnreserveImageSpace();
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
- std::unique_ptr<CompilerCallbacksImpl> callbacks_;
+ std::unique_ptr<CompilerCallbacks> callbacks_;
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<CumulativeLogger> timer_;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index f4ea592781..2fcc3a5abc 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -341,7 +341,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
* is responsible for setting branch target field.
*/
LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
- LIR* branch;
+ LIR* branch = nullptr;
ArmConditionCode arm_cond = ArmConditionEncoding(cond);
/*
* A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
@@ -354,14 +354,22 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va
*/
bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
- if (!skip && reg.Low8() && (check_value == 0) &&
- ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
- branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
- reg.GetReg(), 0);
- } else {
+ if (!skip && reg.Low8() && (check_value == 0)) {
+ if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
+ branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
+ reg.GetReg(), 0);
+ } else if (arm_cond == kArmCondLs) {
+ // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
+ // This case happens for a bounds check of array[0].
+ branch = NewLIR2(kThumb2Cbz, reg.GetReg(), 0);
+ }
+ }
+
+ if (branch == nullptr) {
OpRegImm(kOpCmp, reg, check_value);
branch = NewLIR2(kThumbBCond, 0, arm_cond);
}
+
branch->target = target;
return branch;
}
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 083277de43..462be54e57 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -655,10 +655,10 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
if (kIsDebugBuild && (kFailOnSizeError || kReportSizeError)) {
// Register usage checks: First establish register usage requirements based on the
// format in `kind'.
- bool want_float = false;
- bool want_64_bit = false;
- bool want_var_size = true;
- bool want_zero = false;
+ bool want_float = false; // Want a float (rather than core) register.
+ bool want_64_bit = false; // Want a 64-bit (rather than 32-bit) register.
+ bool want_var_size = true; // Want register with variable size (kFmtReg{R,F}).
+ bool want_zero = false; // Want the zero (rather than sp) register.
switch (kind) {
case kFmtRegX:
want_64_bit = true;
@@ -717,9 +717,6 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
}
}
- // TODO(Arm64): if !want_size_match, then we still should compare the size of the
- // register with the size required by the instruction width (kA64Wide).
-
// Fail, if `expected' contains an unsatisfied requirement.
if (expected != nullptr) {
LOG(WARNING) << "Method: " << PrettyMethod(cu_->method_idx, *cu_->dex_file)
@@ -734,11 +731,12 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
}
}
- // TODO(Arm64): this may or may not be necessary, depending on how wzr, xzr are
- // defined.
- if (is_zero) {
- operand = 31;
- }
+ // In the lines below, we rely on (operand & 0x1f) == 31 to be true for register sp
+ // and zr. This means that these two registers do not need any special treatment, as
+ // their bottom 5 bits are correctly set to 31 == 0b11111, which is the right
+ // value for encoding both sp and zr.
+ COMPILE_ASSERT((rxzr & 0x1f) == 0x1f, rzr_register_number_must_be_31);
+ COMPILE_ASSERT((rsp & 0x1f) == 0x1f, rsp_register_number_must_be_31);
}
value = (operand << encoder->field_loc[i].start) &
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index e4eeeaf580..de976531c2 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -105,16 +105,14 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
// Required for target - register utilities.
RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
- RegStorage reg = TargetReg(symbolic_reg);
if (wide_kind == kWide || wide_kind == kRef) {
- return (reg.Is64Bit()) ? reg : As64BitReg(reg);
+ return As64BitReg(TargetReg(symbolic_reg));
} else {
- return (reg.Is32Bit()) ? reg : As32BitReg(reg);
+ return Check32BitReg(TargetReg(symbolic_reg));
}
}
RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
- RegStorage reg = TargetReg(symbolic_reg);
- return (reg.Is64Bit() ? reg : As64BitReg(reg));
+ return As64BitReg(TargetReg(symbolic_reg));
}
RegStorage GetArgMappingToPhysicalReg(int arg_num);
RegLocation GetReturnAlt();
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 2650892975..6dc4a7ab51 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -269,16 +269,27 @@ void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
*/
LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
LIR* target) {
- LIR* branch;
+ LIR* branch = nullptr;
ArmConditionCode arm_cond = ArmConditionEncoding(cond);
- if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
- ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
- ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
- branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
- } else {
+ if (check_value == 0) {
+ if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
+ ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
+ ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+ branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
+ } else if (arm_cond == kArmCondLs) {
+ // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
+ // This case happens for a bounds check of array[0].
+ ArmOpcode opcode = kA64Cbz2rt;
+ ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+ branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
+ }
+ }
+
+ if (branch == nullptr) {
OpRegImm(kOpCmp, reg, check_value);
branch = NewLIR2(kA64B2ct, arm_cond, 0);
}
+
branch->target = target;
return branch;
}
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 2212380eb4..6a27ad0b14 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -108,19 +108,19 @@ RegLocation Arm64Mir2Lir::LocCReturnDouble() {
RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
RegStorage res_reg = RegStorage::InvalidReg();
switch (reg) {
- case kSelf: res_reg = rs_xSELF; break;
- case kSuspend: res_reg = rs_xSUSPEND; break;
- case kLr: res_reg = rs_xLR; break;
+ case kSelf: res_reg = rs_wSELF; break;
+ case kSuspend: res_reg = rs_wSUSPEND; break;
+ case kLr: res_reg = rs_wLR; break;
case kPc: res_reg = RegStorage::InvalidReg(); break;
- case kSp: res_reg = rs_sp; break;
- case kArg0: res_reg = rs_x0; break;
- case kArg1: res_reg = rs_x1; break;
- case kArg2: res_reg = rs_x2; break;
- case kArg3: res_reg = rs_x3; break;
- case kArg4: res_reg = rs_x4; break;
- case kArg5: res_reg = rs_x5; break;
- case kArg6: res_reg = rs_x6; break;
- case kArg7: res_reg = rs_x7; break;
+ case kSp: res_reg = rs_wsp; break;
+ case kArg0: res_reg = rs_w0; break;
+ case kArg1: res_reg = rs_w1; break;
+ case kArg2: res_reg = rs_w2; break;
+ case kArg3: res_reg = rs_w3; break;
+ case kArg4: res_reg = rs_w4; break;
+ case kArg5: res_reg = rs_w5; break;
+ case kArg6: res_reg = rs_w6; break;
+ case kArg7: res_reg = rs_w7; break;
case kFArg0: res_reg = rs_f0; break;
case kFArg1: res_reg = rs_f1; break;
case kFArg2: res_reg = rs_f2; break;
@@ -129,10 +129,10 @@ RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
case kFArg5: res_reg = rs_f5; break;
case kFArg6: res_reg = rs_f6; break;
case kFArg7: res_reg = rs_f7; break;
- case kRet0: res_reg = rs_x0; break;
- case kRet1: res_reg = rs_x1; break;
- case kInvokeTgt: res_reg = rs_xLR; break;
- case kHiddenArg: res_reg = rs_x12; break;
+ case kRet0: res_reg = rs_w0; break;
+ case kRet1: res_reg = rs_w1; break;
+ case kInvokeTgt: res_reg = rs_wLR; break;
+ case kHiddenArg: res_reg = rs_w12; break;
case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
case kCount: res_reg = RegStorage::InvalidReg(); break;
default: res_reg = RegStorage::InvalidReg();
@@ -929,13 +929,13 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
*/
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.reg = TargetReg(kArg0);
+ rl_src.reg = TargetReg(kArg0, kRef);
rl_src.home = false;
MarkLive(rl_src);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
}
if (cu_->num_ins == 0) {
@@ -961,9 +961,9 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
} else {
// Needs flush.
if (t_loc->ref) {
- StoreRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile);
} else {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
kNotVolatile);
}
}
@@ -971,9 +971,9 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
// If arriving in frame & promoted.
if (t_loc->location == kLocPhysReg) {
if (t_loc->ref) {
- LoadRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
+ LoadRefDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
} else {
- LoadBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg,
+ LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), t_loc->reg,
t_loc->wide ? k64 : k32, kNotVolatile);
}
}
@@ -1070,7 +1070,7 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
@@ -1078,9 +1078,10 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (loc.ref) {
- StoreRefDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
} else {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
+ kNotVolatile);
}
}
next_arg++;
@@ -1114,8 +1115,8 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
RegStorage temp = TargetReg(kArg3, kNotWide);
// Now load the argument VR and store to the outs.
- Load32Disp(TargetReg(kSp), current_src_offset, temp);
- Store32Disp(TargetReg(kSp), current_dest_offset, temp);
+ Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
+ Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
current_src_offset += bytes_to_move;
current_dest_offset += bytes_to_move;
@@ -1126,8 +1127,7 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
// Now handle rest not registers if they are
if (in_to_reg_storage_mapping.IsThereStackMapped()) {
- RegStorage regSingle = TargetReg(kArg2);
- RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg());
+ RegStorage regWide = TargetReg(kArg3, kWide);
for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
RegLocation rl_arg = info->args[i];
rl_arg = UpdateRawLoc(rl_arg);
@@ -1139,25 +1139,27 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
}
} else {
if (rl_arg.location == kLocPhysReg) {
if (rl_arg.ref) {
- StoreRefDisp(TargetReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
} else {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
}
} else {
if (rl_arg.ref) {
+ RegStorage regSingle = TargetReg(kArg2, kRef);
LoadValueDirectFixed(rl_arg, regSingle);
- StoreRefDisp(TargetReg(kSp), out_offset, regSingle, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, regSingle, kNotVolatile);
} else {
- LoadValueDirectFixed(rl_arg, As32BitReg(regSingle));
- StoreBaseDisp(TargetReg(kSp), out_offset, As32BitReg(regSingle), k32, kNotVolatile);
+ RegStorage regSingle = TargetReg(kArg2, kNotWide);
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
}
}
}
@@ -1194,13 +1196,13 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
direct_code, direct_method, type);
if (pcrLabel) {
if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
- *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
+ *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
} else {
*pcrLabel = nullptr;
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
RegStorage tmp = AllocTemp();
- Load32Disp(TargetReg(kArg1), 0, tmp);
+ Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
MarkPossibleNullPointerException(info->opt_flags);
FreeTemp(tmp);
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 45dd7f08a6..0e46c96501 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -48,6 +48,7 @@ static constexpr bool kIntrinsicIsStatic[] = {
true, // kIntrinsicMinMaxFloat
true, // kIntrinsicMinMaxDouble
true, // kIntrinsicSqrt
+ false, // kIntrinsicGet
false, // kIntrinsicCharAt
false, // kIntrinsicCompareTo
false, // kIntrinsicIsEmptyOrLength
@@ -74,6 +75,7 @@ COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxLong], MinMaxLong_must_be_stat
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
@@ -126,6 +128,7 @@ const char* const DexFileMethodInliner::kClassCacheNames[] = {
"D", // kClassCacheDouble
"V", // kClassCacheVoid
"Ljava/lang/Object;", // kClassCacheJavaLangObject
+ "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference
"Ljava/lang/String;", // kClassCacheJavaLangString
"Ljava/lang/Double;", // kClassCacheJavaLangDouble
"Ljava/lang/Float;", // kClassCacheJavaLangFloat
@@ -152,6 +155,7 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = {
"max", // kNameCacheMax
"min", // kNameCacheMin
"sqrt", // kNameCacheSqrt
+ "get", // kNameCacheGet
"charAt", // kNameCacheCharAt
"compareTo", // kNameCacheCompareTo
"isEmpty", // kNameCacheIsEmpty
@@ -220,6 +224,8 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
{ kClassCacheBoolean, 0, { } },
// kProtoCache_I
{ kClassCacheInt, 0, { } },
+ // kProtoCache_Object
+ { kClassCacheJavaLangObject, 0, { } },
// kProtoCache_Thread
{ kClassCacheJavaLangThread, 0, { } },
// kProtoCacheJ_B
@@ -308,6 +314,8 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0),
INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
+ INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
+
INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
@@ -428,6 +436,8 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
case kIntrinsicSqrt:
return backend->GenInlinedSqrt(info);
+ case kIntrinsicGet:
+ return backend->GenInlinedGet(info);
case kIntrinsicCharAt:
return backend->GenInlinedCharAt(info);
case kIntrinsicCompareTo:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 5b3b104150..cb8c165ce5 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -107,6 +107,7 @@ class DexFileMethodInliner {
kClassCacheDouble,
kClassCacheVoid,
kClassCacheJavaLangObject,
+ kClassCacheJavaLangRefReference,
kClassCacheJavaLangString,
kClassCacheJavaLangDouble,
kClassCacheJavaLangFloat,
@@ -140,6 +141,7 @@ class DexFileMethodInliner {
kNameCacheMax,
kNameCacheMin,
kNameCacheSqrt,
+ kNameCacheGet,
kNameCacheCharAt,
kNameCacheCompareTo,
kNameCacheIsEmpty,
@@ -199,6 +201,7 @@ class DexFileMethodInliner {
kProtoCacheString_I,
kProtoCache_Z,
kProtoCache_I,
+ kProtoCache_Object,
kProtoCache_Thread,
kProtoCacheJ_B,
kProtoCacheJ_I,
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 79065b3474..9dedeae071 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -23,9 +23,12 @@
#include "invoke_type.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
+#include "mirror/reference-inl.h"
#include "mirror/string.h"
#include "mir_to_lir-inl.h"
+#include "scoped_thread_state_change.h"
#include "x86/codegen_x86.h"
namespace art {
@@ -1218,6 +1221,88 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
return res;
}
+bool Mir2Lir::GenInlinedGet(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+
+ // the refrence class is stored in the image dex file which might not be the same as the cu's
+ // dex file. Query the reference class for the image dex file then reset to starting dex file
+ // in after loading class type.
+ uint16_t type_idx = 0;
+ const DexFile* ref_dex_file = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
+ ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
+ }
+ CHECK(LIKELY(ref_dex_file != nullptr));
+
+ // address is either static within the image file, or needs to be patched up after compilation.
+ bool unused_type_initialized;
+ bool use_direct_type_ptr;
+ uintptr_t direct_type_ptr;
+ bool is_finalizable;
+ const DexFile* old_dex = cu_->dex_file;
+ cu_->dex_file = ref_dex_file;
+ RegStorage reg_class = TargetPtrReg(kArg1);
+ if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
+ &use_direct_type_ptr, &direct_type_ptr,
+ &is_finalizable) || is_finalizable) {
+ cu_->dex_file = old_dex;
+ // address is not known and post-compile patch is not possible, cannot insert intrinsic.
+ return false;
+ }
+ if (use_direct_type_ptr) {
+ LoadConstant(reg_class, direct_type_ptr);
+ } else {
+ LoadClassType(type_idx, kArg1);
+ }
+ cu_->dex_file = old_dex;
+
+ // get the offset for flags in reference class.
+ uint32_t slow_path_flag_offset = 0;
+ uint32_t disable_flag_offset = 0;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
+ slow_path_flag_offset = reference_class->GetSlowPathFlagOffset().Uint32Value();
+ disable_flag_offset = reference_class->GetDisableIntrinsicFlagOffset().Uint32Value();
+ }
+ CHECK(slow_path_flag_offset && disable_flag_offset &&
+ (slow_path_flag_offset != disable_flag_offset));
+
+ // intrinsic logic start.
+ RegLocation rl_obj = info->args[0];
+ rl_obj = LoadValue(rl_obj);
+
+ RegStorage reg_slow_path = AllocTemp();
+ RegStorage reg_disabled = AllocTemp();
+ Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path);
+ Load32Disp(reg_class, disable_flag_offset, reg_disabled);
+ OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
+ FreeTemp(reg_disabled);
+
+ // if slow path, jump to JNI path target
+ LIR* slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
+ FreeTemp(reg_slow_path);
+
+ // slow path not enabled, simply load the referent of the reference object
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+ GenNullCheck(rl_obj.reg, info->opt_flags);
+ LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
+ kNotVolatile);
+ MarkPossibleNullPointerException(info->opt_flags);
+ StoreValue(rl_dest, rl_result);
+
+ LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
+ AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
+
+ return true;
+}
+
bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
if (cu_->instruction_set == kMips) {
// TODO - add Mips implementation
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 634ab943cf..c68ad6be4b 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -982,6 +982,7 @@ class Mir2Lir : public Backend {
*/
RegLocation InlineTargetWide(CallInfo* info);
+ bool GenInlinedGet(CallInfo* info);
bool GenInlinedCharAt(CallInfo* info);
bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
@@ -1201,7 +1202,7 @@ class Mir2Lir : public Backend {
* @param wide_kind What kind of view of the special register is required.
* @return Return the #RegStorage corresponding to the given purpose @p reg.
*
- * Note: For 32b system, wide (kWide) views only make sense for the argument registers and the
+ * @note For 32b system, wide (kWide) views only make sense for the argument registers and the
* return. In that case, this function should return a pair where the first component of
* the result will be the indicated special register.
*/
diff --git a/compiler/dex/quick_compiler_callbacks.cc b/compiler/dex/quick_compiler_callbacks.cc
new file mode 100644
index 0000000000..03bda78498
--- /dev/null
+++ b/compiler/dex/quick_compiler_callbacks.cc
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "quick_compiler_callbacks.h"
+
+#include "quick/dex_file_to_method_inliner_map.h"
+#include "verifier/method_verifier-inl.h"
+#include "verification_results.h"
+
+namespace art {
+
+bool QuickCompilerCallbacks::MethodVerified(verifier::MethodVerifier* verifier) {
+ bool result = verification_results_->ProcessVerifiedMethod(verifier);
+ if (result) {
+ MethodReference ref = verifier->GetMethodReference();
+ method_inliner_map_->GetMethodInliner(ref.dex_file)
+ ->AnalyseMethodCode(verifier);
+ }
+ return result;
+}
+
+void QuickCompilerCallbacks::ClassRejected(ClassReference ref) {
+ verification_results_->AddRejectedClass(ref);
+}
+
+} // namespace art
diff --git a/compiler/driver/compiler_callbacks_impl.h b/compiler/dex/quick_compiler_callbacks.h
index 92adb20c1f..7c9614f73a 100644
--- a/compiler/driver/compiler_callbacks_impl.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -14,48 +14,38 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_DRIVER_COMPILER_CALLBACKS_IMPL_H_
-#define ART_COMPILER_DRIVER_COMPILER_CALLBACKS_IMPL_H_
+#ifndef ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_
+#define ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_
#include "compiler_callbacks.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
-#include "verifier/method_verifier-inl.h"
namespace art {
-class CompilerCallbacksImpl FINAL : public CompilerCallbacks {
+class VerificationResults;
+class DexFileToMethodInlinerMap;
+
+class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
public:
- CompilerCallbacksImpl(VerificationResults* verification_results,
- DexFileToMethodInlinerMap* method_inliner_map)
+ QuickCompilerCallbacks(VerificationResults* verification_results,
+ DexFileToMethodInlinerMap* method_inliner_map)
: verification_results_(verification_results),
method_inliner_map_(method_inliner_map) {
CHECK(verification_results != nullptr);
CHECK(method_inliner_map != nullptr);
}
- ~CompilerCallbacksImpl() { }
+ ~QuickCompilerCallbacks() { }
bool MethodVerified(verifier::MethodVerifier* verifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void ClassRejected(ClassReference ref) OVERRIDE {
- verification_results_->AddRejectedClass(ref);
- }
+
+ void ClassRejected(ClassReference ref) OVERRIDE;
private:
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
};
-inline bool CompilerCallbacksImpl::MethodVerified(verifier::MethodVerifier* verifier) {
- bool result = verification_results_->ProcessVerifiedMethod(verifier);
- if (result) {
- MethodReference ref = verifier->GetMethodReference();
- method_inliner_map_->GetMethodInliner(ref.dex_file)
- ->AnalyseMethodCode(verifier);
- }
- return result;
-}
-
} // namespace art
-#endif // ART_COMPILER_DRIVER_COMPILER_CALLBACKS_IMPL_H_
+#endif // ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 5325a68b37..9ae9bd400a 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -25,12 +25,13 @@
#include "dex_file.h"
#include "gc/heap.h"
#include "mirror/art_method-inl.h"
-#include "mirror/class.h"
#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "handle_scope-inl.h"
+#include "scoped_thread_state_change.h"
namespace art {
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index e637cfbe1d..e479322238 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -16,8 +16,10 @@
#include "elf_file.h"
+#include "base/stringprintf.h"
#include "common_compiler_test.h"
#include "oat.h"
+#include "utils.h"
namespace art {
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 6b2698085c..982e6d4f2c 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -20,14 +20,16 @@
#include <string>
#include <vector>
+#include "base/unix_file/fd_file.h"
#include "common_compiler_test.h"
-#include "compiler/elf_fixup.h"
-#include "compiler/image_writer.h"
-#include "compiler/oat_writer.h"
+#include "elf_fixup.h"
#include "gc/space/image_space.h"
+#include "image_writer.h"
#include "implicit_check_options.h"
#include "lock_word.h"
#include "mirror/object-inl.h"
+#include "oat_writer.h"
+#include "scoped_thread_state_change.h"
#include "signal_catcher.h"
#include "utils.h"
#include "vector_output_stream.h"
@@ -138,7 +140,7 @@ TEST_F(ImageTest, WriteRead) {
// Remove the reservation of the memory for use to load the image.
UnreserveImageSpace();
- Runtime::Options options;
+ RuntimeOptions options;
std::string image("-Ximage:");
image.append(image_location.GetFilename());
options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index efc0b42db4..6db0c3b8b3 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -19,6 +19,7 @@
#include <vector>
#include "handle_scope.h"
+#include "primitive.h"
#include "thread.h"
#include "utils/managed_register.h"
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index d2ee0ede80..1444ca0309 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -15,15 +15,19 @@
*/
#include "common_compiler_test.h"
-#include "compiler/compiler.h"
-#include "compiler/oat_writer.h"
+#include "compiler.h"
+#include "dex/verification_results.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "dex/quick_compiler_callbacks.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "implicit_check_options.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
#include "oat_file-inl.h"
+#include "oat_writer.h"
+#include "scoped_thread_state_change.h"
#include "vector_output_stream.h"
namespace art {
@@ -95,8 +99,8 @@ TEST_F(OatTest, WriteRead) {
compiler_options_.reset(new CompilerOptions);
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
- callbacks_.reset(new CompilerCallbacksImpl(verification_results_.get(),
- method_inliner_map_.get()));
+ callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
+ method_inliner_map_.get()));
timer_.reset(new CumulativeLogger("Compilation times"));
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b0e6a75b3d..e0db0f18be 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -38,7 +38,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator) {
DCHECK_EQ(frame_size_, kUninitializedFrameSize);
ComputeFrameSize(GetGraph()->GetMaximumNumberOfOutVRegs()
- + GetGraph()->GetNumberOfVRegs()
+ + GetGraph()->GetNumberOfLocalVRegs()
+ GetGraph()->GetNumberOfTemporaries()
+ 1 /* filler */);
GenerateFrameEntry();
@@ -106,6 +106,39 @@ size_t CodeGenerator::AllocateFreeRegisterInternal(
return -1;
}
+void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots) {
+ SetFrameSize(RoundUp(
+ number_of_spill_slots * kVRegSize
+ + kVRegSize // Art method
+ + FrameEntrySpillSize(),
+ kStackAlignment));
+}
+
+Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
+ uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
+ // Use the temporary region (right below the dex registers).
+ int32_t slot = GetFrameSize() - FrameEntrySpillSize()
+ - kVRegSize // filler
+ - (number_of_locals * kVRegSize)
+ - ((1 + temp->GetIndex()) * kVRegSize);
+ return Location::StackSlot(slot);
+}
+
+int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
+ uint16_t reg_number = local->GetRegNumber();
+ uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
+ if (reg_number >= number_of_locals) {
+ // Local is a parameter of the method. It is stored in the caller's frame.
+ return GetFrameSize() + kVRegSize // ART method
+ + (reg_number - number_of_locals) * kVRegSize;
+ } else {
+ // Local is a temporary in this method. It is stored in this method's frame.
+ return GetFrameSize() - FrameEntrySpillSize()
+ - kVRegSize // filler.
+ - (number_of_locals * kVRegSize)
+ + (reg_number * kVRegSize);
+ }
+}
void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
LocationSummary* locations = instruction->GetLocations();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index abfb790d8f..18e3e5a056 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -96,7 +96,10 @@ class CodeGenerator : public ArenaObject {
virtual HGraphVisitor* GetInstructionVisitor() = 0;
virtual Assembler* GetAssembler() = 0;
virtual size_t GetWordSize() const = 0;
- virtual void ComputeFrameSize(size_t number_of_spill_slots) = 0;
+ void ComputeFrameSize(size_t number_of_spill_slots);
+ virtual size_t FrameEntrySpillSize() const = 0;
+ int32_t GetStackSlot(HLocal* local) const;
+ Location GetTemporaryLocation(HTemporary* temp) const;
uint32_t GetFrameSize() const { return frame_size_; }
void SetFrameSize(uint32_t size) { frame_size_ = size; }
@@ -150,7 +153,6 @@ class CodeGenerator : public ArenaObject {
size_t AllocateFreeRegisterInternal(bool* blocked_registers, size_t number_of_registers) const;
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
- virtual Location GetTemporaryLocation(HTemporary* temp) const = 0;
// Frame size required for this method.
uint32_t frame_size_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e70240783a..73c2d48320 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -99,6 +99,10 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
+size_t CodeGeneratorARM::FrameEntrySpillSize() const {
+ return kNumberOfPushedRegistersAtEntry * kArmWordSize;
+}
+
static bool* GetBlockedRegisterPairs(bool* blocked_registers) {
return blocked_registers + kNumberOfAllocIds;
}
@@ -200,14 +204,6 @@ InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGene
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-void CodeGeneratorARM::ComputeFrameSize(size_t number_of_spill_slots) {
- SetFrameSize(RoundUp(
- number_of_spill_slots * kVRegSize
- + kVRegSize // Art method
- + kNumberOfPushedRegistersAtEntry * kArmWordSize,
- kStackAlignment));
-}
-
void CodeGeneratorARM::GenerateFrameEntry() {
core_spill_mask_ |= (1 << LR | 1 << R6 | 1 << R7);
__ PushList(1 << LR | 1 << R6 | 1 << R7);
@@ -226,33 +222,6 @@ void CodeGeneratorARM::Bind(Label* label) {
__ Bind(label);
}
-Location CodeGeneratorARM::GetTemporaryLocation(HTemporary* temp) const {
- uint16_t number_of_vregs = GetGraph()->GetNumberOfVRegs();
- // Use the temporary region (right below the dex registers).
- int32_t slot = GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kArmWordSize)
- - kVRegSize // filler
- - (number_of_vregs * kVRegSize)
- - ((1 + temp->GetIndex()) * kVRegSize);
- return Location::StackSlot(slot);
-}
-
-int32_t CodeGeneratorARM::GetStackSlot(HLocal* local) const {
- uint16_t reg_number = local->GetRegNumber();
- uint16_t number_of_vregs = GetGraph()->GetNumberOfVRegs();
- uint16_t number_of_in_vregs = GetGraph()->GetNumberOfInVRegs();
- if (reg_number >= number_of_vregs - number_of_in_vregs) {
- // Local is a parameter of the method. It is stored in the caller's frame.
- return GetFrameSize() + kVRegSize // ART method
- + (reg_number - number_of_vregs + number_of_in_vregs) * kVRegSize;
- } else {
- // Local is a temporary in this method. It is stored in this method's frame.
- return GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kArmWordSize)
- - kVRegSize // filler.
- - (number_of_vregs * kVRegSize)
- + (reg_number * kVRegSize);
- }
-}
-
Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
switch (load->GetType()) {
case Primitive::kPrimLong:
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index b7322b271e..1b5974f9a2 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -126,7 +126,6 @@ class CodeGeneratorARM : public CodeGenerator {
explicit CodeGeneratorARM(HGraph* graph);
virtual ~CodeGeneratorARM() { }
- virtual void ComputeFrameSize(size_t number_of_spill_slots) OVERRIDE;
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
virtual void Bind(Label* label) OVERRIDE;
@@ -136,6 +135,8 @@ class CodeGeneratorARM : public CodeGenerator {
return kArmWordSize;
}
+ virtual size_t FrameEntrySpillSize() const OVERRIDE;
+
virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
@@ -153,9 +154,7 @@ class CodeGeneratorARM : public CodeGenerator {
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
virtual size_t GetNumberOfRegisters() const OVERRIDE;
- int32_t GetStackSlot(HLocal* local) const;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual Location GetTemporaryLocation(HTemporary* temp) const OVERRIDE;
virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
return kNumberOfCoreRegisters;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 52cb39dc7f..4e69a0cad8 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -83,12 +83,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
-void CodeGeneratorX86::ComputeFrameSize(size_t number_of_spill_slots) {
- SetFrameSize(RoundUp(
- number_of_spill_slots * kVRegSize
- + kVRegSize // Art method
- + kNumberOfPushedRegistersAtEntry * kX86WordSize,
- kStackAlignment));
+size_t CodeGeneratorX86::FrameEntrySpillSize() const {
+ return kNumberOfPushedRegistersAtEntry * kX86WordSize;
}
static bool* GetBlockedRegisterPairs(bool* blocked_registers) {
@@ -204,34 +200,6 @@ void InstructionCodeGeneratorX86::LoadCurrentMethod(Register reg) {
__ movl(reg, Address(ESP, kCurrentMethodStackOffset));
}
-Location CodeGeneratorX86::GetTemporaryLocation(HTemporary* temp) const {
- uint16_t number_of_vregs = GetGraph()->GetNumberOfVRegs();
- // Use the temporary region (right below the dex registers).
- int32_t slot = GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kX86WordSize)
- - kVRegSize // filler
- - (number_of_vregs * kVRegSize)
- - ((1 + temp->GetIndex()) * kVRegSize);
- return Location::StackSlot(slot);
-}
-
-int32_t CodeGeneratorX86::GetStackSlot(HLocal* local) const {
- uint16_t reg_number = local->GetRegNumber();
- uint16_t number_of_vregs = GetGraph()->GetNumberOfVRegs();
- uint16_t number_of_in_vregs = GetGraph()->GetNumberOfInVRegs();
- if (reg_number >= number_of_vregs - number_of_in_vregs) {
- // Local is a parameter of the method. It is stored in the caller's frame.
- return GetFrameSize() + kVRegSize // ART method
- + (reg_number - number_of_vregs + number_of_in_vregs) * kVRegSize;
- } else {
- // Local is a temporary in this method. It is stored in this method's frame.
- return GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kX86WordSize)
- - kVRegSize // filler.
- - (number_of_vregs * kVRegSize)
- + (reg_number * kVRegSize);
- }
-}
-
-
Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const {
switch (load->GetType()) {
case Primitive::kPrimLong:
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 2a4595447d..d622d2a685 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -128,7 +128,6 @@ class CodeGeneratorX86 : public CodeGenerator {
explicit CodeGeneratorX86(HGraph* graph);
virtual ~CodeGeneratorX86() { }
- virtual void ComputeFrameSize(size_t number_of_spill_slots) OVERRIDE;
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
virtual void Bind(Label* label) OVERRIDE;
@@ -138,6 +137,8 @@ class CodeGeneratorX86 : public CodeGenerator {
return kX86WordSize;
}
+ virtual size_t FrameEntrySpillSize() const OVERRIDE;
+
virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
@@ -155,9 +156,7 @@ class CodeGeneratorX86 : public CodeGenerator {
virtual ManagedRegister AllocateFreeRegister(
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
- int32_t GetStackSlot(HLocal* local) const;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual Location GetTemporaryLocation(HTemporary* temp) const OVERRIDE;
virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
return kNumberOfCpuRegisters;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 93d74ee1a2..e3ce5ceb4f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -91,6 +91,10 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph)
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
+size_t CodeGeneratorX86_64::FrameEntrySpillSize() const {
+ return kNumberOfPushedRegistersAtEntry * kX86_64WordSize;
+}
+
InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen)
: HGraphVisitor(graph),
assembler_(codegen->GetAssembler()),
@@ -137,16 +141,6 @@ void CodeGeneratorX86_64::SetupBlockedRegisters(bool* blocked_registers) const {
blocked_registers[R15] = true;
}
-void CodeGeneratorX86_64::ComputeFrameSize(size_t number_of_spill_slots) {
- // Add the current ART method to the frame size, the return PC, and the filler.
- SetFrameSize(RoundUp(
- number_of_spill_slots * kVRegSize
- + kVRegSize // filler
- + kVRegSize // Art method
- + kNumberOfPushedRegistersAtEntry * kX86_64WordSize,
- kStackAlignment));
-}
-
void CodeGeneratorX86_64::GenerateFrameEntry() {
// Create a fake register to mimic Quick.
static const int kFakeReturnRegister = 16;
@@ -170,33 +164,6 @@ void InstructionCodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
__ movl(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
}
-Location CodeGeneratorX86_64::GetTemporaryLocation(HTemporary* temp) const {
- uint16_t number_of_vregs = GetGraph()->GetNumberOfVRegs();
- // Use the temporary region (right below the dex registers).
- int32_t slot = GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kX86_64WordSize)
- - kVRegSize // filler
- - (number_of_vregs * kVRegSize)
- - ((1 + temp->GetIndex()) * kVRegSize);
- return Location::StackSlot(slot);
-}
-
-int32_t CodeGeneratorX86_64::GetStackSlot(HLocal* local) const {
- uint16_t reg_number = local->GetRegNumber();
- uint16_t number_of_vregs = GetGraph()->GetNumberOfVRegs();
- uint16_t number_of_in_vregs = GetGraph()->GetNumberOfInVRegs();
- if (reg_number >= number_of_vregs - number_of_in_vregs) {
- // Local is a parameter of the method. It is stored in the caller's frame.
- return GetFrameSize() + kVRegSize // ART method
- + (reg_number - number_of_vregs + number_of_in_vregs) * kVRegSize;
- } else {
- // Local is a temporary in this method. It is stored in this method's frame.
- return GetFrameSize() - (kNumberOfPushedRegistersAtEntry * kX86_64WordSize)
- - kVRegSize // filler
- - (number_of_vregs * kVRegSize)
- + (reg_number * kVRegSize);
- }
-}
-
Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
switch (load->GetType()) {
case Primitive::kPrimLong:
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 97a0b2e579..8283dda4a5 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -125,7 +125,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {
explicit CodeGeneratorX86_64(HGraph* graph);
virtual ~CodeGeneratorX86_64() {}
- virtual void ComputeFrameSize(size_t number_of_spill_slots) OVERRIDE;
virtual void GenerateFrameEntry() OVERRIDE;
virtual void GenerateFrameExit() OVERRIDE;
virtual void Bind(Label* label) OVERRIDE;
@@ -135,6 +134,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return kX86_64WordSize;
}
+ virtual size_t FrameEntrySpillSize() const OVERRIDE;
+
virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
return &location_builder_;
}
@@ -151,9 +152,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return &move_resolver_;
}
- int32_t GetStackSlot(HLocal* local) const;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual Location GetTemporaryLocation(HTemporary* temp) const OVERRIDE;
virtual size_t GetNumberOfRegisters() const OVERRIDE {
return kNumberOfRegIds;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 689aab08b3..e87b044cc9 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -19,6 +19,7 @@
#include "locations.h"
#include "offsets.h"
+#include "primitive.h"
#include "utils/allocation.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
@@ -138,6 +139,10 @@ class HGraph : public ArenaObject {
return number_of_in_vregs_;
}
+ uint16_t GetNumberOfLocalVRegs() const {
+ return number_of_vregs_ - number_of_in_vregs_;
+ }
+
const GrowableArray<HBasicBlock*>& GetReversePostOrder() const {
return reverse_post_order_;
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 7d4cd1a862..e35ff56c75 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
#include "base/macros.h"
+#include "primitive.h"
#include "utils/growable_array.h"
namespace art {
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index 5fa0ccb143..315ca09e59 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -17,6 +17,7 @@
#include "file_output_stream.h"
#include "vector_output_stream.h"
+#include "base/unix_file/fd_file.h"
#include "base/logging.h"
#include "buffered_output_stream.h"
#include "common_runtime_test.h"