Svelter libart-compiler
Added new environment variable ART_{TARGET,HOST}_CODEGEN_ARCHS which
may be set to 'all', 'svelte' or a space separated list of architectures.
When compiled with ART_{TARGET,HOST}_CODEGEN_ARCHS='all' (the default
value) dex2oat will be able to generate output for all supported
architectures.
When compiled with ART_TARGET_CODEGEN_ARCHS='svelte'
only the architectures of the TARGET will be included. When
ART_HOST_CODEGEN_ARCHS='svelte' all architectures the target includes
and the host architectures will be included on the host dex2oat.
If a list of architectures is given only those will be included.
Change-Id: I87f4ad0131ab1b37544d8799e947ce4733b6daec
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 4944915..7d368a2 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -24,18 +24,6 @@
dex/gvn_dead_code_elimination.cc \
dex/local_value_numbering.cc \
dex/type_inference.cc \
- dex/quick/arm/assemble_arm.cc \
- dex/quick/arm/call_arm.cc \
- dex/quick/arm/fp_arm.cc \
- dex/quick/arm/int_arm.cc \
- dex/quick/arm/target_arm.cc \
- dex/quick/arm/utility_arm.cc \
- dex/quick/arm64/assemble_arm64.cc \
- dex/quick/arm64/call_arm64.cc \
- dex/quick/arm64/fp_arm64.cc \
- dex/quick/arm64/int_arm64.cc \
- dex/quick/arm64/target_arm64.cc \
- dex/quick/arm64/utility_arm64.cc \
dex/quick/codegen_util.cc \
dex/quick/dex_file_method_inliner.cc \
dex/quick/dex_file_to_method_inliner_map.cc \
@@ -44,22 +32,10 @@
dex/quick/gen_loadstore.cc \
dex/quick/lazy_debug_frame_opcode_writer.cc \
dex/quick/local_optimizations.cc \
- dex/quick/mips/assemble_mips.cc \
- dex/quick/mips/call_mips.cc \
- dex/quick/mips/fp_mips.cc \
- dex/quick/mips/int_mips.cc \
- dex/quick/mips/target_mips.cc \
- dex/quick/mips/utility_mips.cc \
dex/quick/mir_to_lir.cc \
dex/quick/quick_compiler.cc \
dex/quick/ralloc_util.cc \
dex/quick/resource_mask.cc \
- dex/quick/x86/assemble_x86.cc \
- dex/quick/x86/call_x86.cc \
- dex/quick/x86/fp_x86.cc \
- dex/quick/x86/int_x86.cc \
- dex/quick/x86/target_x86.cc \
- dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
dex/bb_optimizations.cc \
dex/compiler_ir.cc \
@@ -82,30 +58,13 @@
driver/compiler_options.cc \
driver/dex_compilation_unit.cc \
linker/relative_patcher.cc \
- linker/arm/relative_patcher_arm_base.cc \
- linker/arm/relative_patcher_thumb2.cc \
- linker/arm64/relative_patcher_arm64.cc \
- linker/x86/relative_patcher_x86_base.cc \
- linker/x86/relative_patcher_x86.cc \
- linker/x86_64/relative_patcher_x86_64.cc \
jit/jit_compiler.cc \
- jni/quick/arm/calling_convention_arm.cc \
- jni/quick/arm64/calling_convention_arm64.cc \
- jni/quick/mips/calling_convention_mips.cc \
- jni/quick/mips64/calling_convention_mips64.cc \
- jni/quick/x86/calling_convention_x86.cc \
- jni/quick/x86_64/calling_convention_x86_64.cc \
jni/quick/calling_convention.cc \
jni/quick/jni_compiler.cc \
optimizing/boolean_simplifier.cc \
optimizing/builder.cc \
optimizing/bounds_check_elimination.cc \
optimizing/code_generator.cc \
- optimizing/code_generator_arm.cc \
- optimizing/code_generator_arm64.cc \
- optimizing/code_generator_mips64.cc \
- optimizing/code_generator_x86.cc \
- optimizing/code_generator_x86_64.cc \
optimizing/code_generator_utils.cc \
optimizing/constant_folding.cc \
optimizing/dead_code_elimination.cc \
@@ -115,10 +74,6 @@
optimizing/inliner.cc \
optimizing/instruction_simplifier.cc \
optimizing/intrinsics.cc \
- optimizing/intrinsics_arm.cc \
- optimizing/intrinsics_arm64.cc \
- optimizing/intrinsics_x86.cc \
- optimizing/intrinsics_x86_64.cc \
optimizing/licm.cc \
optimizing/locations.cc \
optimizing/nodes.cc \
@@ -136,21 +91,7 @@
optimizing/stack_map_stream.cc \
trampolines/trampoline_compiler.cc \
utils/arena_bit_vector.cc \
- utils/arm/assembler_arm.cc \
- utils/arm/assembler_arm32.cc \
- utils/arm/assembler_thumb2.cc \
- utils/arm/managed_register_arm.cc \
- utils/arm64/assembler_arm64.cc \
- utils/arm64/managed_register_arm64.cc \
utils/assembler.cc \
- utils/mips/assembler_mips.cc \
- utils/mips/managed_register_mips.cc \
- utils/mips64/assembler_mips64.cc \
- utils/mips64/managed_register_mips64.cc \
- utils/x86/assembler_x86.cc \
- utils/x86/managed_register_x86.cc \
- utils/x86_64/assembler_x86_64.cc \
- utils/x86_64/managed_register_x86_64.cc \
utils/swap_space.cc \
buffered_output_stream.cc \
compiler.cc \
@@ -163,12 +104,89 @@
output_stream.cc \
vector_output_stream.cc
+LIBART_COMPILER_SRC_FILES_arm := \
+ dex/quick/arm/assemble_arm.cc \
+ dex/quick/arm/call_arm.cc \
+ dex/quick/arm/fp_arm.cc \
+ dex/quick/arm/int_arm.cc \
+ dex/quick/arm/target_arm.cc \
+ dex/quick/arm/utility_arm.cc \
+ jni/quick/arm/calling_convention_arm.cc \
+ linker/arm/relative_patcher_arm_base.cc \
+ linker/arm/relative_patcher_thumb2.cc \
+ optimizing/code_generator_arm.cc \
+ optimizing/intrinsics_arm.cc \
+ utils/arm/assembler_arm.cc \
+ utils/arm/assembler_arm32.cc \
+ utils/arm/assembler_thumb2.cc \
+ utils/arm/managed_register_arm.cc \
+
+# TODO We should really separate out those files that are actually needed for both variants of an
+# architecture into its own category. Currently we just include all of the 32bit variant in the
+# 64bit variant. It also might be good to allow one to compile only the 64bit variant without the
+# 32bit one.
+LIBART_COMPILER_SRC_FILES_arm64 := \
+ $(LIBART_COMPILER_SRC_FILES_arm) \
+ dex/quick/arm64/assemble_arm64.cc \
+ dex/quick/arm64/call_arm64.cc \
+ dex/quick/arm64/fp_arm64.cc \
+ dex/quick/arm64/int_arm64.cc \
+ dex/quick/arm64/target_arm64.cc \
+ dex/quick/arm64/utility_arm64.cc \
+ jni/quick/arm64/calling_convention_arm64.cc \
+ linker/arm64/relative_patcher_arm64.cc \
+ optimizing/code_generator_arm64.cc \
+ optimizing/intrinsics_arm64.cc \
+ utils/arm64/assembler_arm64.cc \
+ utils/arm64/managed_register_arm64.cc \
+
+LIBART_COMPILER_SRC_FILES_mips := \
+ dex/quick/mips/assemble_mips.cc \
+ dex/quick/mips/call_mips.cc \
+ dex/quick/mips/fp_mips.cc \
+ dex/quick/mips/int_mips.cc \
+ dex/quick/mips/target_mips.cc \
+ dex/quick/mips/utility_mips.cc \
+ jni/quick/mips/calling_convention_mips.cc \
+ utils/mips/assembler_mips.cc \
+ utils/mips/managed_register_mips.cc \
+
+LIBART_COMPILER_SRC_FILES_mips64 := \
+ $(LIBART_COMPILER_SRC_FILES_mips) \
+ jni/quick/mips64/calling_convention_mips64.cc \
+ optimizing/code_generator_mips64.cc \
+ utils/mips64/assembler_mips64.cc \
+ utils/mips64/managed_register_mips64.cc \
+
+
+LIBART_COMPILER_SRC_FILES_x86 := \
+ dex/quick/x86/assemble_x86.cc \
+ dex/quick/x86/call_x86.cc \
+ dex/quick/x86/fp_x86.cc \
+ dex/quick/x86/int_x86.cc \
+ dex/quick/x86/target_x86.cc \
+ dex/quick/x86/utility_x86.cc \
+ jni/quick/x86/calling_convention_x86.cc \
+ linker/x86/relative_patcher_x86.cc \
+ linker/x86/relative_patcher_x86_base.cc \
+ optimizing/code_generator_x86.cc \
+ optimizing/intrinsics_x86.cc \
+ utils/x86/assembler_x86.cc \
+ utils/x86/managed_register_x86.cc \
+
+LIBART_COMPILER_SRC_FILES_x86_64 := \
+ $(LIBART_COMPILER_SRC_FILES_x86) \
+ jni/quick/x86_64/calling_convention_x86_64.cc \
+ linker/x86_64/relative_patcher_x86_64.cc \
+ optimizing/intrinsics_x86_64.cc \
+ optimizing/code_generator_x86_64.cc \
+ utils/x86_64/assembler_x86_64.cc \
+ utils/x86_64/managed_register_x86_64.cc \
+
+
LIBART_COMPILER_CFLAGS :=
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/quick/arm/arm_lir.h \
- dex/quick/arm64/arm64_lir.h \
- dex/quick/mips/mips_lir.h \
dex/quick/resource_mask.h \
dex/compiler_enums.h \
dex/dex_to_dex_compiler.h \
@@ -177,9 +195,26 @@
driver/compiler_driver.h \
driver/compiler_options.h \
image_writer.h \
- optimizing/locations.h \
+ optimizing/locations.h
+
+LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm := \
+ dex/quick/arm/arm_lir.h \
utils/arm/constants_arm.h
+LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \
+ $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm) \
+ dex/quick/arm64/arm64_lir.h
+
+LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \
+ dex/quick/mips/mips_lir.h
+
+LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \
+ $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips)
+
+LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86 :=
+LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86_64 := \
+ $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86)
+
# $(1): target or host
# $(2): ndebug or debug
# $(3): static or shared (empty means shared, applies only for host)
@@ -202,6 +237,9 @@
include $(CLEAR_VARS)
ifeq ($$(art_target_or_host),host)
LOCAL_IS_HOST_MODULE := true
+ art_codegen_targets := $(ART_HOST_CODEGEN_ARCHS)
+ else
+ art_codegen_targets := $(ART_TARGET_CODEGEN_ARCHS)
endif
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
@@ -230,10 +268,14 @@
LOCAL_MODULE_CLASS := SHARED_LIBRARIES
endif
- LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES)
+ # Sort removes duplicates.
+ LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES) \
+ $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_SRC_FILES_$$(arch))))
GENERATED_SRC_DIR := $$(call local-generated-sources-dir)
- ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,$$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES))
+ ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,\
+ $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES) \
+ $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_$$(arch)))))
ENUM_OPERATOR_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/,$$(ENUM_OPERATOR_OUT_CC_FILES))
$$(ENUM_OPERATOR_OUT_GEN): art/tools/generate-operator-out.py
@@ -326,6 +368,7 @@
art_target_or_host :=
art_ndebug_or_debug :=
art_static_or_shared :=
+ art_codegen_targets :=
endef
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 39496a4..6e73ae7 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -43,10 +43,21 @@
#include "runtime.h"
// Specific compiler backends.
+#ifdef ART_ENABLE_CODEGEN_arm
#include "dex/quick/arm/backend_arm.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
#include "dex/quick/arm64/backend_arm64.h"
+#endif
+
+#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64)
#include "dex/quick/mips/backend_mips.h"
+#endif
+
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
#include "dex/quick/x86/backend_x86.h"
+#endif
namespace art {
@@ -844,22 +855,42 @@
UNUSED(compilation_unit);
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
case kThumb2:
mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
+#endif // ART_ENABLE_CODEGEN_arm
+#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
+#endif // ART_ENABLE_CODEGEN_arm64
+#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64)
+ // Intentional 2 level ifdef. Want to fail on mips64 if it is not enabled, even if mips is
+ // and vice versa.
+#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
// Fall-through.
+#endif // ART_ENABLE_CODEGEN_mips
+#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
+#endif // ART_ENABLE_CODEGEN_mips64
mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
+#endif // ART_ENABLE_CODEGEN_mips || ART_ENABLE_CODEGEN_mips64
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+ // Intentional 2 level ifdef. Want to fail on x86_64 if it is not enabled, even if x86 is
+ // and vice versa.
+#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
// Fall-through.
+#endif // ART_ENABLE_CODEGEN_x86
+#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
+#endif // ART_ENABLE_CODEGEN_x86_64
mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
+#endif // ART_ENABLE_CODEGEN_x86 || ART_ENABLE_CODEGEN_x86_64
default:
LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
}
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index bb8136b..cef8c5d 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -17,12 +17,30 @@
#include "calling_convention.h"
#include "base/logging.h"
+
+#ifdef ART_ENABLE_CODEGEN_arm
#include "jni/quick/arm/calling_convention_arm.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
#include "jni/quick/arm64/calling_convention_arm64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips
#include "jni/quick/mips/calling_convention_mips.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips64
#include "jni/quick/mips64/calling_convention_mips64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86
#include "jni/quick/x86/calling_convention_x86.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86_64
#include "jni/quick/x86_64/calling_convention_x86_64.h"
+#endif
namespace art {
@@ -31,19 +49,31 @@
ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set) {
switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
return new arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return new mips64::Mips64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return nullptr;
@@ -106,19 +136,31 @@
const char* shorty,
InstructionSet instruction_set) {
switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
return new arm::ArmJniCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return new mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
+#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return nullptr;
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index 89aed95..82702dc 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -16,10 +16,18 @@
#include "linker/relative_patcher.h"
+#ifdef ART_ENABLE_CODEGEN_arm
#include "linker/arm/relative_patcher_thumb2.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
#include "linker/arm64/relative_patcher_arm64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
#include "linker/x86/relative_patcher_x86.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
#include "linker/x86_64/relative_patcher_x86_64.h"
+#endif
#include "output_stream.h"
namespace art {
@@ -64,18 +72,28 @@
DISALLOW_COPY_AND_ASSIGN(RelativePatcherNone);
};
+ UNUSED(features);
+ UNUSED(provider);
switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return std::unique_ptr<RelativePatcher>(new X86RelativePatcher());
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher());
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
// Fall through: we generate Thumb2 code for "arm".
case kThumb2:
return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider));
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return std::unique_ptr<RelativePatcher>(
new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures()));
+#endif
default:
return std::unique_ptr<RelativePatcher>(new RelativePatcherNone);
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 4607ebe..eba9f36 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -16,11 +16,26 @@
#include "code_generator.h"
+#ifdef ART_ENABLE_CODEGEN_arm
#include "code_generator_arm.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
#include "code_generator_arm64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86
#include "code_generator_x86.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86_64
#include "code_generator_x86_64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips64
#include "code_generator_mips64.h"
+#endif
+
#include "compiled_method.h"
#include "dex/verified_method.h"
#include "driver/dex_compilation_unit.h"
@@ -31,6 +46,7 @@
#include "mirror/array-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object_reference.h"
+#include "parallel_move_resolver.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
#include "verifier/dex_gc_map.h"
@@ -516,34 +532,49 @@
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options) {
switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2: {
return new arm::CodeGeneratorARM(graph,
*isa_features.AsArmInstructionSetFeatures(),
compiler_options);
}
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
return new arm64::CodeGeneratorARM64(graph,
*isa_features.AsArm64InstructionSetFeatures(),
compiler_options);
}
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
+ UNUSED(compiler_options);
+ UNUSED(graph);
+ UNUSED(isa_features);
return nullptr;
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
return new mips64::CodeGeneratorMIPS64(graph,
*isa_features.AsMips64InstructionSetFeatures(),
compiler_options);
}
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
return new x86::CodeGeneratorX86(graph,
*isa_features.AsX86InstructionSetFeatures(),
compiler_options);
}
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
return new x86_64::CodeGeneratorX86_64(graph,
*isa_features.AsX86_64InstructionSetFeatures(),
compiler_options);
}
+#endif
default:
return nullptr;
}
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index facc630..39e5259 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -17,17 +17,36 @@
#include "trampoline_compiler.h"
#include "jni_env_ext.h"
+
+#ifdef ART_ENABLE_CODEGEN_arm
#include "utils/arm/assembler_thumb2.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
#include "utils/arm64/assembler_arm64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips
#include "utils/mips/assembler_mips.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_mips64
#include "utils/mips64/assembler_mips64.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86
#include "utils/x86/assembler_x86.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_x86_64
#include "utils/x86_64/assembler_x86_64.h"
+#endif
#define __ assembler.
namespace art {
+#ifdef ART_ENABLE_CODEGEN_arm
namespace arm {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<4> offset) {
@@ -55,7 +74,9 @@
return entry_stub.release();
}
} // namespace arm
+#endif // ART_ENABLE_CODEGEN_arm
+#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<8> offset) {
@@ -92,7 +113,9 @@
return entry_stub.release();
}
} // namespace arm64
+#endif // ART_ENABLE_CODEGEN_arm64
+#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<4> offset) {
@@ -122,7 +145,9 @@
return entry_stub.release();
}
} // namespace mips
+#endif // ART_ENABLE_CODEGEN_mips
+#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<8> offset) {
@@ -152,7 +177,9 @@
return entry_stub.release();
}
} // namespace mips64
+#endif // ART_ENABLE_CODEGEN_mips
+#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) {
X86Assembler assembler;
@@ -170,7 +197,9 @@
return entry_stub.release();
}
} // namespace x86
+#endif // ART_ENABLE_CODEGEN_x86
+#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<8> offset) {
x86_64::X86_64Assembler assembler;
@@ -188,17 +217,26 @@
return entry_stub.release();
}
} // namespace x86_64
+#endif // ART_ENABLE_CODEGEN_x86_64
const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi,
ThreadOffset<8> offset) {
switch (isa) {
+#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return arm64::CreateTrampoline(abi, offset);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return mips64::CreateTrampoline(abi, offset);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return x86_64::CreateTrampoline(offset);
+#endif
default:
+ UNUSED(abi);
+ UNUSED(offset);
LOG(FATAL) << "Unexpected InstructionSet: " << isa;
UNREACHABLE();
}
@@ -207,13 +245,20 @@
const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi,
ThreadOffset<4> offset) {
switch (isa) {
+#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
return arm::CreateTrampoline(abi, offset);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return mips::CreateTrampoline(abi, offset);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
+ UNUSED(abi);
return x86::CreateTrampoline(offset);
+#endif
default:
LOG(FATAL) << "Unexpected InstructionSet: " << isa;
UNREACHABLE();
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 6d8a989..496ca95 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -19,13 +19,25 @@
#include <algorithm>
#include <vector>
+#ifdef ART_ENABLE_CODEGEN_arm
#include "arm/assembler_arm32.h"
#include "arm/assembler_thumb2.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
#include "arm64/assembler_arm64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
#include "mips/assembler_mips.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
#include "mips64/assembler_mips64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
#include "x86/assembler_x86.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
#include "x86_64/assembler_x86_64.h"
+#endif
#include "globals.h"
#include "memory_region.h"
@@ -112,20 +124,32 @@
Assembler* Assembler::Create(InstructionSet instruction_set) {
switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
return new arm::Arm32Assembler();
case kThumb2:
return new arm::Thumb2Assembler();
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return new arm64::Arm64Assembler();
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return new mips::MipsAssembler();
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return new mips64::Mips64Assembler();
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return new x86::X86Assembler();
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return new x86_64::X86_64Assembler();
+#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return nullptr;