Merge "Tracking java.lang.reflect.Executable changes"
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..835048d
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,6 @@
+subdirs = [
+ "build",
+ "compiler",
+ "runtime",
+ "sigchainlib",
+]
diff --git a/Android.mk b/Android.mk
index 4dc84c4..9d0062b 100644
--- a/Android.mk
+++ b/Android.mk
@@ -76,9 +76,6 @@
########################################################################
# product rules
-include $(art_path)/runtime/Android.mk
-include $(art_path)/runtime/simulator/Android.mk
-include $(art_path)/compiler/Android.mk
include $(art_path)/dexdump/Android.mk
include $(art_path)/dexlist/Android.mk
include $(art_path)/dex2oat/Android.mk
@@ -92,7 +89,6 @@
include $(art_path)/tools/ahat/Android.mk
include $(art_path)/tools/dexfuzz/Android.mk
include $(art_path)/tools/dmtracedump/Android.mk
-include $(art_path)/sigchainlib/Android.mk
include $(art_path)/libart_fake/Android.mk
@@ -555,9 +551,6 @@
art_test_bother :=
TEST_ART_TARGET_SYNC_DEPS :=
-include $(art_path)/runtime/openjdkjvm/Android.mk
-include $(art_path)/runtime/openjdkjvmti/Android.mk
-
# Helper target that depends on boot image creation.
#
# Can be used, for example, to dump initialization failures:
diff --git a/build/Android.bp b/build/Android.bp
new file mode 100644
index 0000000..be7dafd
--- /dev/null
+++ b/build/Android.bp
@@ -0,0 +1,158 @@
+bootstrap_go_package {
+ name: "soong-art",
+ pkgPath: "android/soong/art",
+ deps: [
+ "blueprint",
+ "blueprint-pathtools",
+ "soong",
+ "soong-android",
+ "soong-cc",
+ ],
+ srcs: [
+ "art.go",
+ "codegen.go",
+ "makevars.go",
+ ],
+ pluginFor: ["soong_build"],
+}
+
+art_global_defaults {
+ // Additional flags are computed by art.go
+
+ name: "art_defaults",
+ clang: true,
+ cflags: [
+ "-O3",
+
+ // Base set of cflags used by all things ART.
+ "-fno-rtti",
+ "-ggdb3",
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ "-Wstrict-aliasing",
+ "-fstrict-aliasing",
+ "-Wunreachable-code",
+ "-Wredundant-decls",
+ "-Wshadow",
+ "-Wunused",
+ "-fvisibility=protected",
+
+ // Warn about thread safety violations with clang.
+ "-Wthread-safety",
+ "-Wthread-safety-negative",
+
+ // Warn if switch fallthroughs aren't annotated.
+ "-Wimplicit-fallthrough",
+
+ // Enable float equality warnings.
+ "-Wfloat-equal",
+
+ // Enable warning of converting ints to void*.
+ "-Wint-to-void-pointer-cast",
+
+ // Enable warning of wrong unused annotations.
+ "-Wused-but-marked-unused",
+
+ // Enable warning for deprecated language features.
+ "-Wdeprecated",
+
+ // Enable warning for unreachable break & return.
+ "-Wunreachable-code-break",
+ "-Wunreachable-code-return",
+
+ // Bug: http://b/29823425 Disable -Wconstant-conversion and
+ // -Wundefined-var-template for Clang update to r271374
+ "-Wno-constant-conversion",
+ "-Wno-undefined-var-template",
+
+ "-DART_STACK_OVERFLOW_GAP_arm=8192",
+ "-DART_STACK_OVERFLOW_GAP_arm64=8192",
+ "-DART_STACK_OVERFLOW_GAP_mips=16384",
+ "-DART_STACK_OVERFLOW_GAP_mips64=16384",
+ "-DART_STACK_OVERFLOW_GAP_x86=8192",
+ "-DART_STACK_OVERFLOW_GAP_x86_64=8192",
+ ],
+
+ target: {
+ android: {
+ cflags: [
+ "-DART_TARGET",
+
+ // Enable missing-noreturn only on non-Mac. As lots of things are not implemented
+ // for Apple, it's a pain.
+ "-Wmissing-noreturn",
+
+ // To use oprofile_android --callgraph, uncomment this and recompile with
+ // mmma -j art
+ // "-fno-omit-frame-pointer",
+ // "-marm",
+ // "-mapcs",
+ ],
+ include_dirs: [
+ // We optimize Thread::Current() with a direct TLS access. This requires access to a
+ // private Bionic header.
+ "bionic/libc/private",
+ ],
+ },
+ linux: {
+ cflags: [
+ // Enable missing-noreturn only on non-Mac. As lots of things are not implemented for
+ // Apple, it's a pain.
+ "-Wmissing-noreturn",
+ ],
+ },
+ host: {
+ cflags: [
+ // Bug: 15446488. We don't omit the frame pointer to work around
+ // clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
+ "-fno-omit-frame-pointer",
+ ],
+ },
+ },
+
+ codegen: {
+ arm: {
+ cflags: ["-DART_ENABLE_CODEGEN_arm"],
+ },
+ arm64: {
+ cflags: ["-DART_ENABLE_CODEGEN_arm64"],
+ },
+ mips: {
+ cflags: ["-DART_ENABLE_CODEGEN_mips"],
+ },
+ mips64: {
+ cflags: ["-DART_ENABLE_CODEGEN_mips64"],
+ },
+ x86: {
+ cflags: ["-DART_ENABLE_CODEGEN_x86"],
+ },
+ x86_64: {
+ cflags: ["-DART_ENABLE_CODEGEN_x86_64"],
+ },
+ },
+
+ include_dirs: [
+ "external/gtest/include",
+ "external/icu/icu4c/source/common",
+ "external/lz4/lib",
+ "external/valgrind/include",
+ "external/valgrind",
+ "external/vixl/src",
+ "external/zlib",
+ ],
+}
+
+cc_defaults {
+ name: "art_debug_defaults",
+ cflags: [
+ "-O2",
+ "-DDYNAMIC_ANNOTATIONS_ENABLED=1",
+ "-DVIXL_DEBUG",
+ "-UNDEBUG",
+ "-Wno-frame-larger-than=",
+ ],
+ asflags: [
+ "-UNDEBUG",
+ ],
+}
diff --git a/build/art.go b/build/art.go
new file mode 100644
index 0000000..4e64dcf
--- /dev/null
+++ b/build/art.go
@@ -0,0 +1,180 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package art
+
+import (
+ "android/soong"
+ "android/soong/android"
+ "android/soong/cc"
+ "fmt"
+
+ "github.com/google/blueprint"
+)
+
+var supportedArches = []string{"arm", "arm64", "mips", "mips64", "x86", "x86_64"}
+
+func globalFlags(ctx android.BaseContext) ([]string, []string) {
+ var cflags []string
+ var asflags []string
+
+ tlab := false
+
+ gcType := envDefault(ctx, "ART_DEFAULT_GC_TYPE", "CMS")
+
+ if envTrue(ctx, "ART_TEST_DEBUG_GC") {
+ gcType = "SS"
+ tlab = true
+ }
+
+ cflags = append(cflags, "-DART_DEFAULT_GC_TYPE_IS_"+gcType)
+ if tlab {
+ cflags = append(cflags, "-DART_USE_TLAB=1")
+ }
+
+ imtSize := envDefault(ctx, "ART_IMT_SIZE", "43")
+ cflags = append(cflags, "-DIMT_SIZE="+imtSize)
+
+ if envTrue(ctx, "ART_HEAP_POISONING") {
+ cflags = append(cflags, "-DART_HEAP_POISONING=1")
+ asflags = append(asflags, "-DART_HEAP_POISONING=1")
+ }
+
+ if envTrue(ctx, "ART_USE_READ_BARRIER") {
+ // Used to change the read barrier type. Valid values are BAKER, BROOKS, TABLELOOKUP.
+ // The default is BAKER.
+ barrierType := envDefault(ctx, "ART_READ_BARRIER_TYPE", "BAKER")
+ cflags = append(cflags,
+ "-DART_USE_READ_BARRIER=1",
+ "-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
+ asflags = append(asflags,
+ "-DART_USE_READ_BARRIER=1",
+ "-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
+
+ // Temporarily override -fstack-protector-strong with -fstack-protector to avoid a major
+ // slowdown with the read barrier config. b/26744236.
+ cflags = append(cflags, "-fstack-protector")
+ }
+
+ return cflags, asflags
+}
+
+func deviceFlags(ctx android.BaseContext) []string {
+ var cflags []string
+ deviceFrameSizeLimit := 1736
+ if len(ctx.AConfig().SanitizeDevice()) > 0 {
+ deviceFrameSizeLimit = 6400
+ }
+ cflags = append(cflags,
+ fmt.Sprintf("-Wframe-larger-than=%d", deviceFrameSizeLimit),
+ fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", deviceFrameSizeLimit),
+ )
+
+ cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.AConfig().LibartImgDeviceBaseAddress())
+ if envTrue(ctx, "ART_TARGET_LINUX") {
+ cflags = append(cflags, "-DART_TARGET_LINUX")
+ } else {
+ cflags = append(cflags, "-DART_TARGET_ANDROID")
+ }
+ minDelta := envDefault(ctx, "LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA", "-0x1000000")
+ maxDelta := envDefault(ctx, "LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA", "0x1000000")
+ cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta)
+ cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta)
+
+ return cflags
+}
+
+func hostFlags(ctx android.BaseContext) []string {
+ var cflags []string
+ hostFrameSizeLimit := 1736
+ cflags = append(cflags,
+ fmt.Sprintf("-Wframe-larger-than=%d", hostFrameSizeLimit),
+ fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", hostFrameSizeLimit),
+ )
+
+ cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.AConfig().LibartImgHostBaseAddress())
+ minDelta := envDefault(ctx, "LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA", "-0x1000000")
+ maxDelta := envDefault(ctx, "LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA", "0x1000000")
+ cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta)
+ cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta)
+
+ return cflags
+}
+
+func (a *artGlobalDefaults) CustomizeProperties(ctx android.CustomizePropertiesContext) {
+ type props struct {
+ Target struct {
+ Android struct {
+ Cflags []string
+ }
+ Host struct {
+ Cflags []string
+ }
+ }
+ Cflags []string
+ Asflags []string
+ }
+
+ p := &props{}
+ p.Cflags, p.Asflags = globalFlags(ctx)
+ p.Target.Android.Cflags = deviceFlags(ctx)
+ p.Target.Host.Cflags = hostFlags(ctx)
+ ctx.AppendProperties(p)
+}
+
+type artGlobalDefaults struct{}
+
+func init() {
+ soong.RegisterModuleType("art_cc_library", artLibrary)
+ soong.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
+ soong.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
+}
+
+func artGlobalDefaultsFactory() (blueprint.Module, []interface{}) {
+ c := &artGlobalDefaults{}
+ module, props := artDefaultsFactory()
+ android.AddCustomizer(module.(android.Module), c)
+
+ return module, props
+}
+
+func artDefaultsFactory() (blueprint.Module, []interface{}) {
+ c := &codegenCustomizer{}
+ module, props := cc.DefaultsFactory(&c.codegenProperties)
+ android.AddCustomizer(module.(android.Module), c)
+
+ return module, props
+}
+
+func artLibrary() (blueprint.Module, []interface{}) {
+ library, _ := cc.NewLibrary(android.HostAndDeviceSupported, true, true)
+ module, props := library.Init()
+
+ c := &codegenCustomizer{}
+ android.AddCustomizer(library, c)
+ props = append(props, &c.codegenProperties)
+ return module, props
+}
+
+func envDefault(ctx android.BaseContext, key string, defaultValue string) string {
+ ret := ctx.AConfig().Getenv(key)
+ if ret == "" {
+ return defaultValue
+ }
+ return ret
+}
+
+func envTrue(ctx android.BaseContext, key string) bool {
+ return ctx.AConfig().Getenv(key) == "true"
+}
diff --git a/build/codegen.go b/build/codegen.go
new file mode 100644
index 0000000..eb2c37d
--- /dev/null
+++ b/build/codegen.go
@@ -0,0 +1,123 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package art
+
+// This file implements the "codegen" property to apply different properties based on the currently
+// selected codegen arches, which defaults to all arches on the host and the primary and secondary
+// arches on the device.
+
+import (
+ "android/soong/android"
+ "sort"
+ "strings"
+)
+
+func (a *codegenCustomizer) CustomizeProperties(ctx android.CustomizePropertiesContext) {
+ c := &a.codegenProperties.Codegen
+
+ var hostArches, deviceArches []string
+
+ e := envDefault(ctx, "ART_HOST_CODEGEN_ARCHS", "")
+ if e == "" {
+ hostArches = supportedArches
+ } else {
+ hostArches = strings.Split(e, " ")
+ }
+
+ e = envDefault(ctx, "ART_TARGET_CODEGEN_ARCHS", "")
+ if e == "" {
+ deviceArches = defaultDeviceCodegenArches(ctx)
+ } else {
+ deviceArches = strings.Split(e, " ")
+ }
+
+ type props struct {
+ Target struct {
+ Android *codegenArchProperties
+ Host *codegenArchProperties
+ }
+ }
+
+ addCodegenArchProperties := func(p *props, hod **codegenArchProperties, arch string) {
+ switch arch {
+ case "arm":
+ *hod = &c.Arm
+ case "arm64":
+ *hod = &c.Arm64
+ case "mips":
+ *hod = &c.Mips
+ case "mips64":
+ *hod = &c.Mips64
+ case "x86":
+ *hod = &c.X86
+ case "x86_64":
+ *hod = &c.X86_64
+ default:
+ ctx.ModuleErrorf("Unknown codegen architecture %q", arch)
+ return
+ }
+ ctx.AppendProperties(p)
+ }
+
+ for _, a := range deviceArches {
+ p := &props{}
+ addCodegenArchProperties(p, &p.Target.Android, a)
+ if ctx.Failed() {
+ return
+ }
+ }
+
+ for _, a := range hostArches {
+ p := &props{}
+ addCodegenArchProperties(p, &p.Target.Host, a)
+ if ctx.Failed() {
+ return
+ }
+ }
+}
+
+type codegenArchProperties struct {
+ Srcs []string
+ Cflags []string
+ Static struct {
+ Whole_static_libs []string
+ }
+ Shared struct {
+ Shared_libs []string
+ }
+}
+
+type codegenProperties struct {
+ Codegen struct {
+ Arm, Arm64, Mips, Mips64, X86, X86_64 codegenArchProperties
+ }
+}
+
+type codegenCustomizer struct {
+ codegenProperties codegenProperties
+}
+
+func defaultDeviceCodegenArches(ctx android.CustomizePropertiesContext) []string {
+ arches := make(map[string]bool)
+ for _, a := range ctx.DeviceConfig().Arches() {
+ arches[a.ArchType.String()] = true
+ }
+ ret := make([]string, 0, len(arches))
+ for a := range arches {
+ ret = append(ret, a)
+ }
+ sort.Strings(ret)
+ return ret
+}
diff --git a/build/makevars.go b/build/makevars.go
new file mode 100644
index 0000000..5655c55
--- /dev/null
+++ b/build/makevars.go
@@ -0,0 +1,30 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package art
+
+import "android/soong/android"
+
+var (
+ pctx = android.NewPackageContext("android/soong/art")
+)
+
+func init() {
+ android.RegisterMakeVarsProvider(pctx, makeVarsProvider)
+}
+
+func makeVarsProvider(ctx android.MakeVarsContext) {
+ ctx.Strict("LIBART_IMG_HOST_BASE_ADDRESS", ctx.Config().LibartImgHostBaseAddress())
+ ctx.Strict("LIBART_IMG_TARGET_BASE_ADDRESS", ctx.Config().LibartImgDeviceBaseAddress())
+}
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index b57383b..b74e588 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -581,7 +581,7 @@
: background_collector_type_(gc::kCollectorTypeNone) {
if (kUseReadBarrier) {
- background_collector_type_ = gc::kCollectorTypeCC; // Disable background compaction for CC.
+ background_collector_type_ = gc::kCollectorTypeCCBackground; // Background compaction for CC.
}
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
new file mode 100644
index 0000000..289adf8
--- /dev/null
+++ b/compiler/Android.bp
@@ -0,0 +1,280 @@
+//
+// Copyright (C) 2012 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// TODO We should really separate out those files that are actually needed for both variants of an
+// architecture into its own category. Currently we just include all of the 32bit variant in the
+// 64bit variant. It also might be good to allow one to compile only the 64bit variant without the
+// 32bit one.
+
+art_cc_defaults {
+ name: "libart-compiler-defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ clang: true,
+ srcs: [
+ "compiled_method.cc",
+ "debug/elf_debug_writer.cc",
+ "dex/dex_to_dex_compiler.cc",
+ "dex/verified_method.cc",
+ "dex/verification_results.cc",
+ "dex/quick_compiler_callbacks.cc",
+ "dex/quick/dex_file_method_inliner.cc",
+ "dex/quick/dex_file_to_method_inliner_map.cc",
+ "driver/compiled_method_storage.cc",
+ "driver/compiler_driver.cc",
+ "driver/compiler_options.cc",
+ "driver/dex_compilation_unit.cc",
+ "linker/buffered_output_stream.cc",
+ "linker/file_output_stream.cc",
+ "linker/multi_oat_relative_patcher.cc",
+ "linker/output_stream.cc",
+ "linker/vector_output_stream.cc",
+ "linker/relative_patcher.cc",
+ "jit/jit_compiler.cc",
+ "jni/quick/calling_convention.cc",
+ "jni/quick/jni_compiler.cc",
+ "optimizing/block_builder.cc",
+ "optimizing/bounds_check_elimination.cc",
+ "optimizing/builder.cc",
+ "optimizing/code_generator.cc",
+ "optimizing/code_generator_utils.cc",
+ "optimizing/constant_folding.cc",
+ "optimizing/dead_code_elimination.cc",
+ "optimizing/graph_checker.cc",
+ "optimizing/graph_visualizer.cc",
+ "optimizing/gvn.cc",
+ "optimizing/induction_var_analysis.cc",
+ "optimizing/induction_var_range.cc",
+ "optimizing/inliner.cc",
+ "optimizing/instruction_builder.cc",
+ "optimizing/instruction_simplifier.cc",
+ "optimizing/intrinsics.cc",
+ "optimizing/licm.cc",
+ "optimizing/load_store_elimination.cc",
+ "optimizing/locations.cc",
+ "optimizing/nodes.cc",
+ "optimizing/optimization.cc",
+ "optimizing/optimizing_compiler.cc",
+ "optimizing/parallel_move_resolver.cc",
+ "optimizing/prepare_for_register_allocation.cc",
+ "optimizing/reference_type_propagation.cc",
+ "optimizing/register_allocation_resolver.cc",
+ "optimizing/register_allocator.cc",
+ "optimizing/register_allocator_graph_color.cc",
+ "optimizing/register_allocator_linear_scan.cc",
+ "optimizing/select_generator.cc",
+ "optimizing/sharpening.cc",
+ "optimizing/side_effects_analysis.cc",
+ "optimizing/ssa_builder.cc",
+ "optimizing/ssa_liveness_analysis.cc",
+ "optimizing/ssa_phi_elimination.cc",
+ "optimizing/stack_map_stream.cc",
+ "trampolines/trampoline_compiler.cc",
+ "utils/assembler.cc",
+ "utils/jni_macro_assembler.cc",
+ "utils/swap_space.cc",
+ "compiler.cc",
+ "elf_writer.cc",
+ "elf_writer_quick.cc",
+ "image_writer.cc",
+ "oat_writer.cc",
+ ],
+
+ codegen: {
+ arm: {
+ srcs: [
+ "jni/quick/arm/calling_convention_arm.cc",
+ "linker/arm/relative_patcher_arm_base.cc",
+ "linker/arm/relative_patcher_thumb2.cc",
+ "optimizing/code_generator_arm.cc",
+ "optimizing/dex_cache_array_fixups_arm.cc",
+ "optimizing/instruction_simplifier_arm.cc",
+ "optimizing/instruction_simplifier_shared.cc",
+ "optimizing/intrinsics_arm.cc",
+ "utils/arm/assembler_arm.cc",
+ "utils/arm/assembler_arm_vixl.cc",
+ "utils/arm/assembler_thumb2.cc",
+ "utils/arm/jni_macro_assembler_arm.cc",
+ "utils/arm/jni_macro_assembler_arm_vixl.cc",
+ "utils/arm/managed_register_arm.cc",
+ ],
+ },
+ arm64: {
+ srcs: [
+ "jni/quick/arm64/calling_convention_arm64.cc",
+ "linker/arm64/relative_patcher_arm64.cc",
+ "optimizing/code_generator_arm64.cc",
+ "optimizing/instruction_simplifier_arm64.cc",
+ "optimizing/intrinsics_arm64.cc",
+ "optimizing/nodes_arm64.cc",
+ "utils/arm64/assembler_arm64.cc",
+ "utils/arm64/jni_macro_assembler_arm64.cc",
+ "utils/arm64/managed_register_arm64.cc",
+ ],
+ },
+ mips: {
+ srcs: [
+ "jni/quick/mips/calling_convention_mips.cc",
+ "linker/mips/relative_patcher_mips.cc",
+ "optimizing/code_generator_mips.cc",
+ "optimizing/dex_cache_array_fixups_mips.cc",
+ "optimizing/intrinsics_mips.cc",
+ "optimizing/pc_relative_fixups_mips.cc",
+ "utils/mips/assembler_mips.cc",
+ "utils/mips/managed_register_mips.cc",
+ ],
+ },
+ mips64: {
+ srcs: [
+ "jni/quick/mips64/calling_convention_mips64.cc",
+ "optimizing/code_generator_mips64.cc",
+ "optimizing/intrinsics_mips64.cc",
+ "utils/mips64/assembler_mips64.cc",
+ "utils/mips64/managed_register_mips64.cc",
+ ],
+ },
+ x86: {
+ srcs: [
+ "jni/quick/x86/calling_convention_x86.cc",
+ "linker/x86/relative_patcher_x86.cc",
+ "linker/x86/relative_patcher_x86_base.cc",
+ "optimizing/code_generator_x86.cc",
+ "optimizing/intrinsics_x86.cc",
+ "optimizing/pc_relative_fixups_x86.cc",
+ "optimizing/x86_memory_gen.cc",
+ "utils/x86/assembler_x86.cc",
+ "utils/x86/jni_macro_assembler_x86.cc",
+ "utils/x86/managed_register_x86.cc",
+ ],
+ },
+ x86_64: {
+ srcs: [
+ "jni/quick/x86_64/calling_convention_x86_64.cc",
+ "linker/x86_64/relative_patcher_x86_64.cc",
+ "optimizing/intrinsics_x86_64.cc",
+ "optimizing/code_generator_x86_64.cc",
+ "utils/x86_64/assembler_x86_64.cc",
+ "utils/x86_64/jni_macro_assembler_x86_64.cc",
+ "utils/x86_64/managed_register_x86_64.cc",
+ ],
+ },
+ },
+ target: {
+ host: {
+ // For compiler driver TLS.
+ host_ldlibs: ["-lpthread"],
+ },
+ android: {
+ // For atrace.
+ shared_libs: ["libcutils"],
+ },
+ },
+ generated_sources: ["art_compiler_operator_srcs"],
+ shared_libs: [
+ "liblz4",
+ "liblzma",
+ ],
+ include_dirs: ["art/disassembler"],
+}
+
+gensrcs {
+ name: "art_compiler_operator_srcs",
+ cmd: "art/tools/generate-operator-out.py art/compiler $in > $out",
+ srcs: [
+ "compiled_method.h",
+ "dex/dex_to_dex_compiler.h",
+ "driver/compiler_driver.h",
+ "driver/compiler_options.h",
+ "image_writer.h",
+ "optimizing/locations.h",
+
+ "utils/arm/constants_arm.h",
+ "utils/mips/assembler_mips.h",
+ "utils/mips64/assembler_mips64.h",
+ ],
+ output_extension: "operator_out.cc",
+}
+
+art_cc_library {
+ name: "libart-compiler",
+ defaults: ["libart-compiler-defaults"],
+ codegen: {
+ arm: {
+ // VIXL assembly support for ARM targets.
+ static: {
+ whole_static_libs: [
+ "libvixl-arm",
+ ],
+ },
+ shared: {
+ shared_libs: [
+ "libvixl-arm",
+ ],
+ },
+ },
+ arm64: {
+ // VIXL assembly support for ARM64 targets.
+ static: {
+ whole_static_libs: [
+ "libvixl-arm64",
+ ],
+ },
+ shared: {
+ shared_libs: [
+ "libvixl-arm64",
+ ],
+ },
+ },
+ },
+ shared_libs: ["libart"],
+}
+
+art_cc_library {
+ name: "libartd-compiler",
+ defaults: [
+ "libart-compiler-defaults",
+ "art_debug_defaults",
+ ],
+ codegen: {
+ arm: {
+ // VIXL assembly support for ARM targets.
+ static: {
+ whole_static_libs: [
+ "libvixld-arm",
+ ],
+ },
+ shared: {
+ shared_libs: [
+ "libvixld-arm",
+ ],
+ },
+ },
+ arm64: {
+ // VIXL assembly support for ARM64 targets.
+ static: {
+ whole_static_libs: [
+ "libvixld-arm64",
+ ],
+ },
+ shared: {
+ shared_libs: [
+ "libvixld-arm64",
+ ],
+ },
+ },
+ },
+ shared_libs: ["libartd"],
+}
diff --git a/compiler/Android.mk b/compiler/Android.mk
deleted file mode 100644
index 08fd309..0000000
--- a/compiler/Android.mk
+++ /dev/null
@@ -1,344 +0,0 @@
-#
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-include art/build/Android.common_build.mk
-
-LIBART_COMPILER_SRC_FILES := \
- compiled_method.cc \
- debug/elf_debug_writer.cc \
- dex/dex_to_dex_compiler.cc \
- dex/verified_method.cc \
- dex/verification_results.cc \
- dex/quick_compiler_callbacks.cc \
- dex/quick/dex_file_method_inliner.cc \
- dex/quick/dex_file_to_method_inliner_map.cc \
- driver/compiled_method_storage.cc \
- driver/compiler_driver.cc \
- driver/compiler_options.cc \
- driver/dex_compilation_unit.cc \
- linker/buffered_output_stream.cc \
- linker/file_output_stream.cc \
- linker/multi_oat_relative_patcher.cc \
- linker/output_stream.cc \
- linker/vector_output_stream.cc \
- linker/relative_patcher.cc \
- jit/jit_compiler.cc \
- jni/quick/calling_convention.cc \
- jni/quick/jni_compiler.cc \
- optimizing/block_builder.cc \
- optimizing/bounds_check_elimination.cc \
- optimizing/builder.cc \
- optimizing/code_generator.cc \
- optimizing/code_generator_utils.cc \
- optimizing/constant_folding.cc \
- optimizing/dead_code_elimination.cc \
- optimizing/graph_checker.cc \
- optimizing/graph_visualizer.cc \
- optimizing/gvn.cc \
- optimizing/induction_var_analysis.cc \
- optimizing/induction_var_range.cc \
- optimizing/inliner.cc \
- optimizing/instruction_builder.cc \
- optimizing/instruction_simplifier.cc \
- optimizing/intrinsics.cc \
- optimizing/licm.cc \
- optimizing/load_store_elimination.cc \
- optimizing/locations.cc \
- optimizing/nodes.cc \
- optimizing/optimization.cc \
- optimizing/optimizing_compiler.cc \
- optimizing/parallel_move_resolver.cc \
- optimizing/prepare_for_register_allocation.cc \
- optimizing/reference_type_propagation.cc \
- optimizing/register_allocation_resolver.cc \
- optimizing/register_allocator.cc \
- optimizing/register_allocator_graph_color.cc \
- optimizing/register_allocator_linear_scan.cc \
- optimizing/select_generator.cc \
- optimizing/sharpening.cc \
- optimizing/side_effects_analysis.cc \
- optimizing/ssa_builder.cc \
- optimizing/ssa_liveness_analysis.cc \
- optimizing/ssa_phi_elimination.cc \
- optimizing/stack_map_stream.cc \
- trampolines/trampoline_compiler.cc \
- utils/assembler.cc \
- utils/jni_macro_assembler.cc \
- utils/swap_space.cc \
- compiler.cc \
- elf_writer.cc \
- elf_writer_quick.cc \
- image_writer.cc \
- oat_writer.cc
-
-LIBART_COMPILER_SRC_FILES_arm := \
- jni/quick/arm/calling_convention_arm.cc \
- linker/arm/relative_patcher_arm_base.cc \
- linker/arm/relative_patcher_thumb2.cc \
- optimizing/code_generator_arm.cc \
- optimizing/dex_cache_array_fixups_arm.cc \
- optimizing/instruction_simplifier_arm.cc \
- optimizing/instruction_simplifier_shared.cc \
- optimizing/intrinsics_arm.cc \
- utils/arm/assembler_arm.cc \
- utils/arm/assembler_arm_vixl.cc \
- utils/arm/assembler_thumb2.cc \
- utils/arm/jni_macro_assembler_arm.cc \
- utils/arm/jni_macro_assembler_arm_vixl.cc \
- utils/arm/managed_register_arm.cc \
-
-# TODO We should really separate out those files that are actually needed for both variants of an
-# architecture into its own category. Currently we just include all of the 32bit variant in the
-# 64bit variant. It also might be good to allow one to compile only the 64bit variant without the
-# 32bit one.
-LIBART_COMPILER_SRC_FILES_arm64 := \
- $(LIBART_COMPILER_SRC_FILES_arm) \
- jni/quick/arm64/calling_convention_arm64.cc \
- linker/arm64/relative_patcher_arm64.cc \
- optimizing/nodes_arm64.cc \
- optimizing/code_generator_arm64.cc \
- optimizing/instruction_simplifier_arm64.cc \
- optimizing/instruction_simplifier_shared.cc \
- optimizing/intrinsics_arm64.cc \
- utils/arm64/assembler_arm64.cc \
- utils/arm64/jni_macro_assembler_arm64.cc \
- utils/arm64/managed_register_arm64.cc \
-
-LIBART_COMPILER_SRC_FILES_mips := \
- jni/quick/mips/calling_convention_mips.cc \
- linker/mips/relative_patcher_mips.cc \
- optimizing/code_generator_mips.cc \
- optimizing/dex_cache_array_fixups_mips.cc \
- optimizing/intrinsics_mips.cc \
- optimizing/pc_relative_fixups_mips.cc \
- utils/mips/assembler_mips.cc \
- utils/mips/managed_register_mips.cc \
-
-LIBART_COMPILER_SRC_FILES_mips64 := \
- $(LIBART_COMPILER_SRC_FILES_mips) \
- jni/quick/mips64/calling_convention_mips64.cc \
- optimizing/code_generator_mips64.cc \
- optimizing/intrinsics_mips64.cc \
- utils/mips64/assembler_mips64.cc \
- utils/mips64/managed_register_mips64.cc \
-
-
-LIBART_COMPILER_SRC_FILES_x86 := \
- jni/quick/x86/calling_convention_x86.cc \
- linker/x86/relative_patcher_x86.cc \
- linker/x86/relative_patcher_x86_base.cc \
- optimizing/code_generator_x86.cc \
- optimizing/intrinsics_x86.cc \
- optimizing/pc_relative_fixups_x86.cc \
- optimizing/x86_memory_gen.cc \
- utils/x86/assembler_x86.cc \
- utils/x86/jni_macro_assembler_x86.cc \
- utils/x86/managed_register_x86.cc \
-
-LIBART_COMPILER_SRC_FILES_x86_64 := \
- $(LIBART_COMPILER_SRC_FILES_x86) \
- jni/quick/x86_64/calling_convention_x86_64.cc \
- linker/x86_64/relative_patcher_x86_64.cc \
- optimizing/intrinsics_x86_64.cc \
- optimizing/code_generator_x86_64.cc \
- utils/x86_64/assembler_x86_64.cc \
- utils/x86_64/jni_macro_assembler_x86_64.cc \
- utils/x86_64/managed_register_x86_64.cc \
-
-
-LIBART_COMPILER_CFLAGS :=
-
-LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- compiled_method.h \
- dex/dex_to_dex_compiler.h \
- driver/compiler_driver.h \
- driver/compiler_options.h \
- image_writer.h \
- optimizing/locations.h
-
-LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm := \
- utils/arm/constants_arm.h
-
-LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \
- $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm)
-
-LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \
- utils/mips/assembler_mips.h
-
-LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \
- $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips) \
- utils/mips64/assembler_mips64.h
-
-LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86 :=
-LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86_64 := \
- $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86)
-
-# $(1): target or host
-# $(2): ndebug or debug
-# $(3): static or shared (empty means shared, applies only for host)
-define build-libart-compiler
- ifneq ($(1),target)
- ifneq ($(1),host)
- $$(error expected target or host for argument 1, received $(1))
- endif
- endif
- ifneq ($(2),ndebug)
- ifneq ($(2),debug)
- $$(error expected ndebug or debug for argument 2, received $(2))
- endif
- endif
-
- art_target_or_host := $(1)
- art_ndebug_or_debug := $(2)
- art_static_or_shared := $(3)
-
- include $(CLEAR_VARS)
- ifeq ($$(art_target_or_host),host)
- LOCAL_IS_HOST_MODULE := true
- art_codegen_targets := $(ART_HOST_CODEGEN_ARCHS)
- else
- art_codegen_targets := $(ART_TARGET_CODEGEN_ARCHS)
- endif
- LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_MODULE := libart-compiler
- ifeq ($$(art_static_or_shared), static)
- LOCAL_STATIC_LIBRARIES += libart liblz4 liblzma
- else
- LOCAL_SHARED_LIBRARIES += libart liblz4 liblzma
- endif
- ifeq ($$(art_target_or_host),target)
- LOCAL_FDO_SUPPORT := true
- endif
- else # debug
- LOCAL_MODULE := libartd-compiler
- ifeq ($$(art_static_or_shared), static)
- LOCAL_STATIC_LIBRARIES += libartd liblz4 liblzma
- else
- LOCAL_SHARED_LIBRARIES += libartd liblz4 liblzma
- endif
- endif
-
- LOCAL_MODULE_TAGS := optional
- ifeq ($$(art_static_or_shared), static)
- LOCAL_MODULE_CLASS := STATIC_LIBRARIES
- else
- LOCAL_MODULE_CLASS := SHARED_LIBRARIES
- endif
-
- # Sort removes duplicates.
- LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES) \
- $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_SRC_FILES_$$(arch))))
-
- GENERATED_SRC_DIR := $$(call local-generated-sources-dir)
- ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,\
- $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES) \
- $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_$$(arch)))))
- ENUM_OPERATOR_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/,$$(ENUM_OPERATOR_OUT_CC_FILES))
-
-$$(ENUM_OPERATOR_OUT_GEN): art/tools/generate-operator-out.py
-$$(ENUM_OPERATOR_OUT_GEN): PRIVATE_CUSTOM_TOOL = art/tools/generate-operator-out.py $(LOCAL_PATH) $$< > $$@
-$$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PATH)/%.h
- $$(transform-generated-source)
-
- LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
-
- LOCAL_CFLAGS := $$(LIBART_COMPILER_CFLAGS)
- ifeq ($$(art_target_or_host),target)
- LOCAL_CLANG := $(ART_TARGET_CLANG)
- $(call set-target-local-cflags-vars,$(2))
- else # host
- LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
- ifeq ($$(art_static_or_shared),static)
- LOCAL_LDFLAGS += -static
- endif
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_DEBUG_ASFLAGS)
- else
- LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS)
- endif
- endif
-
- LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime art/disassembler
-
- ifeq ($$(art_target_or_host),host)
- # For compiler driver TLS.
- LOCAL_LDLIBS += -lpthread
- endif
- LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
- LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
- # VIXL assembly support for ARM64 targets.
- ifeq ($$(art_ndebug_or_debug),debug)
- ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLESTATIC_LIBRARIES += libvixld-arm libvixld-arm64
- else
- LOCAL_SHARED_LIBRARIES += libvixld-arm libvixld-arm64
- endif
- else
- ifeq ($$(art_static_or_shared), static)
- LOCAL_WHOLE_STATIC_LIBRARIES += libvixl-arm libvixl-arm64
- else
- LOCAL_SHARED_LIBRARIES += libvixl-arm libvixl-arm64
- endif
- endif
-
- LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
-
- ifeq ($$(art_target_or_host),target)
- # For atrace.
- LOCAL_SHARED_LIBRARIES += libcutils
- include $(BUILD_SHARED_LIBRARY)
- else # host
- LOCAL_MULTILIB := both
- ifeq ($$(art_static_or_shared), static)
- include $(BUILD_HOST_STATIC_LIBRARY)
- else
- include $(BUILD_HOST_SHARED_LIBRARY)
- endif
- endif
-
- # Clear locally defined variables.
- art_target_or_host :=
- art_ndebug_or_debug :=
- art_static_or_shared :=
- art_codegen_targets :=
-endef
-
-# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-libart-compiler,host,ndebug))
- ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-libart-compiler,host,ndebug,static))
- endif
-endif
-ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-libart-compiler,host,debug))
- ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-libart-compiler,host,debug,static))
- endif
-endif
-ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
- $(eval $(call build-libart-compiler,target,ndebug))
-endif
-ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-libart-compiler,target,debug))
-endif
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c532e72..0d3f849 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1224,6 +1224,7 @@
DCHECK(instruction->IsInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
instruction->IsArrayGet() ||
+ instruction->IsArraySet() ||
instruction->IsLoadClass() ||
instruction->IsLoadString() ||
instruction->IsInstanceOf() ||
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 6d9c55c..5ac7d17 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -74,8 +74,10 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
+ arm_codegen->InvokeRuntime(kQuickThrowNullPointer,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
}
@@ -98,8 +100,7 @@
// Live registers will be restored in the catch block if caught.
SaveLiveRegisters(codegen, instruction_->GetLocations());
}
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
+ arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -119,8 +120,7 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
+ arm_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickTestSuspend, void, void>();
if (successor_ == nullptr) {
__ b(GetReturnLabel());
@@ -174,10 +174,10 @@
locations->InAt(1),
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimInt);
- uint32_t entry_point_offset = instruction_->AsBoundsCheck()->IsStringCharAt()
- ? QUICK_ENTRY_POINT(pThrowStringBounds)
- : QUICK_ENTRY_POINT(pThrowArrayBounds);
- arm_codegen->InvokeRuntime(entry_point_offset, instruction_, instruction_->GetDexPc(), this);
+ QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
+ ? kQuickThrowStringBounds
+ : kQuickThrowArrayBounds;
+ arm_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
}
@@ -209,10 +209,9 @@
InvokeRuntimeCallingConvention calling_convention;
__ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
- int32_t entry_point_offset = do_clinit_
- ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
- : QUICK_ENTRY_POINT(pInitializeType);
- arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
+ QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
+ : kQuickInitializeType;
+ arm_codegen->InvokeRuntime(entrypoint, at_, dex_pc_, this);
if (do_clinit_) {
CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
@@ -263,8 +262,7 @@
InvokeRuntimeCallingConvention calling_convention;
const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
__ LoadImmediate(calling_convention.GetRegisterAt(0), string_index);
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
+ arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
@@ -309,7 +307,7 @@
Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
instruction_,
instruction_->GetDexPc(),
this);
@@ -318,10 +316,7 @@
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
}
@@ -350,10 +345,7 @@
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ arm_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -392,10 +384,7 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ arm_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
RestoreLiveRegisters(codegen, locations);
__ b(GetExitLabel());
@@ -425,6 +414,7 @@
DCHECK(instruction_->IsInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
+ instruction_->IsArraySet() ||
instruction_->IsLoadClass() ||
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
@@ -612,10 +602,7 @@
codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
__ LoadImmediate(calling_convention.GetRegisterAt(2), offset_);
}
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow),
- instruction_,
- instruction_->GetDexPc(),
- this);
+ arm_codegen->InvokeRuntime(kQuickReadBarrierSlow, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<
kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
arm_codegen->Move32(out_, Location::RegisterLocation(R0));
@@ -679,7 +666,7 @@
InvokeRuntimeCallingConvention calling_convention;
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), root_);
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow),
+ arm_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
instruction_,
instruction_->GetDexPc(),
this);
@@ -1220,19 +1207,8 @@
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- InvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value(),
- instruction,
- dex_pc,
- slow_path);
-}
-
-void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path) {
ValidateInvokeRuntime(instruction, slow_path);
- __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
- __ blx(LR);
+ GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value());
RecordPcInfo(instruction, dex_pc, slow_path);
}
@@ -1240,6 +1216,10 @@
HInstruction* instruction,
SlowPathCode* slow_path) {
ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ GenerateInvokeRuntime(entry_point_offset);
+}
+
+void CodeGeneratorARM::GenerateInvokeRuntime(int32_t entry_point_offset) {
__ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset);
__ blx(LR);
}
@@ -2377,19 +2357,13 @@
case Primitive::kPrimFloat:
// Processing a Dex `float-to-long' instruction.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l),
- conversion,
- conversion->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickF2l, int64_t, float>();
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-long' instruction.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l),
- conversion,
- conversion->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickD2l, int64_t, double>();
break;
@@ -2436,10 +2410,7 @@
case Primitive::kPrimLong:
// Processing a Dex `long-to-float' instruction.
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pL2f),
- conversion,
- conversion->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickL2f, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickL2f, float, int64_t>();
break;
@@ -2971,7 +2942,7 @@
DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
DCHECK_EQ(R0, out.AsRegister<Register>());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
+ codegen_->InvokeRuntime(kQuickIdivmod, div, div->GetDexPc());
CheckEntrypointTypes<kQuickIdivmod, int32_t, int32_t, int32_t>();
}
break;
@@ -2986,7 +2957,7 @@
DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
DCHECK_EQ(R1, out.AsRegisterPairHigh<Register>());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc(), nullptr);
+ codegen_->InvokeRuntime(kQuickLdiv, div, div->GetDexPc());
CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
break;
}
@@ -3115,26 +3086,26 @@
DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
DCHECK_EQ(R1, out.AsRegister<Register>());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
+ codegen_->InvokeRuntime(kQuickIdivmod, rem, rem->GetDexPc());
CheckEntrypointTypes<kQuickIdivmod, int32_t, int32_t, int32_t>();
}
break;
}
case Primitive::kPrimLong: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), rem, rem->GetDexPc(), nullptr);
+ codegen_->InvokeRuntime(kQuickLmod, rem, rem->GetDexPc());
CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
break;
}
case Primitive::kPrimFloat: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf), rem, rem->GetDexPc(), nullptr);
+ codegen_->InvokeRuntime(kQuickFmodf, rem, rem->GetDexPc());
CheckEntrypointTypes<kQuickFmodf, float, float, float>();
break;
}
case Primitive::kPrimDouble: {
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod), rem, rem->GetDexPc(), nullptr);
+ codegen_->InvokeRuntime(kQuickFmod, rem, rem->GetDexPc());
CheckEntrypointTypes<kQuickFmod, double, double, double>();
break;
}
@@ -3574,10 +3545,7 @@
__ blx(LR);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
}
}
@@ -3597,10 +3565,7 @@
__ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
// Note: if heap poisoning is enabled, the entry point takes cares
// of poisoning the reference.
- codegen_->InvokeRuntime(instruction->GetEntrypoint(),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
}
@@ -4642,12 +4607,10 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- bool object_array_set_with_read_barrier =
- kEmitCompilerReadBarrier && (value_type == Primitive::kPrimNot);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- (may_need_runtime_call_for_type_check || object_array_set_with_read_barrier) ?
+ may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
@@ -4660,6 +4623,7 @@
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
+ // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4744,8 +4708,10 @@
}
DCHECK(needs_write_barrier);
- Register temp1 = locations->GetTemp(0).AsRegister<Register>();
- Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+ Location temp1_loc = locations->GetTemp(0);
+ Register temp1 = temp1_loc.AsRegister<Register>();
+ Location temp2_loc = locations->GetTemp(1);
+ Register temp2 = temp2_loc.AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
@@ -4776,33 +4742,97 @@
}
if (kEmitCompilerReadBarrier) {
- // When read barriers are enabled, the type checking
- // instrumentation requires two read barriers:
- //
- // __ Mov(temp2, temp1);
- // // /* HeapReference<Class> */ temp1 = temp1->component_type_
- // __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp1_loc, temp1_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = value->klass_
- // __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp1_loc);
- //
- // __ cmp(temp1, ShifterOperand(temp2));
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled.
- __ b(slow_path->GetEntryLabel());
+ if (!kUseBakerReadBarrier) {
+ // When (non-Baker) read barriers are enabled, the type
+ // checking instrumentation requires two read barriers
+ // generated by CodeGeneratorARM::GenerateReadBarrierSlow:
+ //
+ // __ Mov(temp2, temp1);
+ // // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ // __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp1_loc, temp1_loc, temp2_loc, component_offset);
+ //
+ // // /* HeapReference<Class> */ temp2 = value->klass_
+ // __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp1_loc);
+ //
+ // __ cmp(temp1, ShifterOperand(temp2));
+ //
+ // However, the second read barrier may trash `temp`, as it
+ // is a temporary register, and as such would not be saved
+ // along with live registers before calling the runtime (nor
+ // restored afterwards). So in this case, we bail out and
+ // delegate the work to the array set slow path.
+ //
+ // TODO: Extend the register allocator to support a new
+ // "(locally) live temp" location so as to avoid always
+ // going into the slow path when read barriers are enabled?
+ //
+ // There is no such problem with Baker read barriers (see below).
+ __ b(slow_path->GetEntryLabel());
+ } else {
+ Register temp3 = IP;
+ Location temp3_loc = Location::RegisterLocation(temp3);
+
+ // Note: `temp3` (scratch register IP) cannot be used as
+ // `ref` argument of GenerateFieldLoadWithBakerReadBarrier
+ // calls below (see ReadBarrierMarkSlowPathARM for more
+ // details).
+
+ // /* HeapReference<Class> */ temp1 = array->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ temp1_loc,
+ array,
+ class_offset,
+ temp3_loc,
+ /* needs_null_check */ true);
+
+ // /* HeapReference<Class> */ temp1 = temp1->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ temp1_loc,
+ temp1,
+ component_offset,
+ temp3_loc,
+ /* needs_null_check */ false);
+ // Register `temp1` is not trashed by the read barrier
+ // emitted by GenerateFieldLoadWithBakerReadBarrier below,
+ // as that method produces a call to a ReadBarrierMarkRegX
+ // entry point, which saves all potentially live registers,
+ // including temporaries such a `temp1`.
+ // /* HeapReference<Class> */ temp2 = value->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ temp2_loc,
+ value,
+ class_offset,
+ temp3_loc,
+ /* needs_null_check */ false);
+ // If heap poisoning is enabled, `temp1` and `temp2` have
+ // been unpoisoned by the the previous calls to
+ // CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier.
+ __ cmp(temp1, ShifterOperand(temp2));
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ Label do_put;
+ __ b(&do_put, EQ);
+ // We do not need to emit a read barrier for the
+ // following heap reference load, as `temp1` is only used
+ // in a comparison with null below, and this reference
+ // is not kept afterwards.
+ // /* HeapReference<Class> */ temp1 = temp1->super_class_
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp`, as we are comparing against null below.
+ __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ b(slow_path->GetEntryLabel(), NE);
+ }
+ }
} else {
+ // Non read barrier code.
+
// /* HeapReference<Class> */ temp1 = array->klass_
__ LoadFromOffset(kLoadWord, temp1, array, class_offset);
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -5370,10 +5400,7 @@
LocationSummary* locations = cls->GetLocations();
if (cls->NeedsAccessCheck()) {
codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
- codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
- cls,
- cls->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
return;
}
@@ -5632,8 +5659,7 @@
}
void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
- codegen_->InvokeRuntime(
- QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
+ codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
}
@@ -6033,11 +6059,9 @@
}
void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
- codegen_->InvokeRuntime(instruction->IsEnter()
- ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
- instruction,
- instruction->GetDexPc(),
- nullptr);
+ codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
+ instruction,
+ instruction->GetDexPc());
if (instruction->IsEnter()) {
CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
} else {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5d9b2dc..a07dd6b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -390,12 +390,7 @@
void InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) OVERRIDE;
-
- void InvokeRuntime(int32_t offset,
- HInstruction* instruction,
- uint32_t dex_pc,
- SlowPathCode* slow_path);
+ SlowPathCode* slow_path = nullptr) OVERRIDE;
// Generate code to invoke a runtime entry point, but do not record
// PC-related information in a stack map.
@@ -403,6 +398,8 @@
HInstruction* instruction,
SlowPathCode* slow_path);
+ void GenerateInvokeRuntime(int32_t entry_point_offset);
+
// Emit a write barrier.
void MarkGCCard(Register temp, Register card, Register object, Register value, bool can_be_null);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1101edf..17fc13c 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -585,6 +585,7 @@
DCHECK(instruction_->IsInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
+ instruction_->IsArraySet() ||
instruction_->IsLoadClass() ||
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
@@ -2156,11 +2157,9 @@
Primitive::Type value_type = instruction->GetComponentType();
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- bool object_array_set_with_read_barrier =
- kEmitCompilerReadBarrier && (value_type == Primitive::kPrimNot);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- (may_need_runtime_call_for_type_check || object_array_set_with_read_barrier) ?
+ may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
@@ -2172,6 +2171,11 @@
} else {
locations->SetInAt(2, Location::RequiresRegister());
}
+ if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && (value_type == Primitive::kPrimNot)) {
+ // Additional temporary registers for a Baker read barrier.
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
@@ -2221,7 +2225,6 @@
codegen_->Store(value_type, value, destination);
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
- DCHECK(needs_write_barrier);
DCHECK(!instruction->GetArray()->IsIntermediateAddress());
vixl::aarch64::Label done;
SlowPathCodeARM64* slow_path = nullptr;
@@ -2260,33 +2263,112 @@
}
if (kEmitCompilerReadBarrier) {
- // When read barriers are enabled, the type checking
- // instrumentation requires two read barriers:
- //
- // __ Mov(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ Ldr(temp, HeapOperand(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = value->klass_
- // __ Ldr(temp2, HeapOperand(Register(value), class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp_loc);
- //
- // __ Cmp(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled.
- __ B(slow_path->GetEntryLabel());
+ if (!kUseBakerReadBarrier) {
+ // When (non-Baker) read barriers are enabled, the type
+ // checking instrumentation requires two read barriers
+ // generated by CodeGeneratorARM64::GenerateReadBarrierSlow:
+ //
+ // __ Mov(temp2, temp);
+ // // /* HeapReference<Class> */ temp = temp->component_type_
+ // __ Ldr(temp, HeapOperand(temp, component_offset));
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ //
+ // // /* HeapReference<Class> */ temp2 = value->klass_
+ // __ Ldr(temp2, HeapOperand(Register(value), class_offset));
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp_loc);
+ //
+ // __ Cmp(temp, temp2);
+ //
+ // However, the second read barrier may trash `temp`, as it
+ // is a temporary register, and as such would not be saved
+ // along with live registers before calling the runtime (nor
+ // restored afterwards). So in this case, we bail out and
+ // delegate the work to the array set slow path.
+ //
+ // TODO: Extend the register allocator to support a new
+ // "(locally) live temp" location so as to avoid always
+ // going into the slow path when read barriers are enabled?
+ //
+ // There is no such problem with Baker read barriers (see below).
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // Note that we cannot use `temps` (instance of VIXL's
+ // UseScratchRegisterScope) to allocate `temp2` because
+ // the Baker read barriers generated by
+ // GenerateFieldLoadWithBakerReadBarrier below also use
+ // that facility to allocate a temporary register, thus
+ // making VIXL's scratch register pool empty.
+ Location temp2_loc = locations->GetTemp(0);
+ Register temp2 = WRegisterFrom(temp2_loc);
+
+ // Note: Because it is acquired from VIXL's scratch register
+ // pool, `temp` might be IP0, and thus cannot be used as
+ // `ref` argument of GenerateFieldLoadWithBakerReadBarrier
+ // calls below (see ReadBarrierMarkSlowPathARM64 for more
+ // details).
+
+ // /* HeapReference<Class> */ temp2 = array->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ temp2_loc,
+ array,
+ class_offset,
+ temp,
+ /* needs_null_check */ true,
+ /* use_load_acquire */ false);
+
+ // /* HeapReference<Class> */ temp2 = temp2->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ temp2_loc,
+ temp2,
+ component_offset,
+ temp,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ // For the same reason that we request `temp2` from the
+ // register allocator above, we cannot get `temp3` from
+ // VIXL's scratch register pool.
+ Location temp3_loc = locations->GetTemp(1);
+ Register temp3 = WRegisterFrom(temp3_loc);
+ // Register `temp2` is not trashed by the read barrier
+ // emitted by GenerateFieldLoadWithBakerReadBarrier below,
+ // as that method produces a call to a ReadBarrierMarkRegX
+ // entry point, which saves all potentially live registers,
+ // including temporaries such a `temp2`.
+ // /* HeapReference<Class> */ temp3 = register_value->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+ temp3_loc,
+ value.W(),
+ class_offset,
+ temp,
+ /* needs_null_check */ false,
+ /* use_load_acquire */ false);
+ // If heap poisoning is enabled, `temp2` and `temp3` have
+ // been unpoisoned by the the previous calls to
+ // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
+ __ Cmp(temp2, temp3);
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ vixl::aarch64::Label do_put;
+ __ B(eq, &do_put);
+ // We do not need to emit a read barrier for the
+ // following heap reference load, as `temp2` is only used
+ // in a comparison with null below, and this reference
+ // is not kept afterwards.
+ // /* HeapReference<Class> */ temp = temp2->super_class_
+ __ Ldr(temp, HeapOperand(temp2, super_offset));
+ // If heap poisoning is enabled, no need to unpoison
+ // `temp`, as we are comparing against null below.
+ __ Cbnz(temp, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ B(ne, slow_path->GetEntryLabel());
+ }
+ }
} else {
+ // Non read barrier code.
+
Register temp2 = temps.AcquireSameSizeAs(array);
// /* HeapReference<Class> */ temp = array->klass_
__ Ldr(temp, HeapOperand(array, class_offset));
@@ -2300,6 +2382,7 @@
// If heap poisoning is enabled, no need to unpoison `temp`
// nor `temp2`, as we are comparing two poisoned references.
__ Cmp(temp, temp2);
+ temps.Release(temp2);
if (instruction->StaticTypeOfArrayIsObjectArray()) {
vixl::aarch64::Label do_put;
@@ -2317,7 +2400,6 @@
} else {
__ B(ne, slow_path->GetEntryLabel());
}
- temps.Release(temp2);
}
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f50eb5c..675c5e0 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -460,6 +460,7 @@
DCHECK(instruction_->IsInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
+ instruction_->IsArraySet() ||
instruction_->IsLoadClass() ||
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
@@ -5253,12 +5254,10 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- bool object_array_set_with_read_barrier =
- kEmitCompilerReadBarrier && (value_type == Primitive::kPrimNot);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- (may_need_runtime_call_for_type_check || object_array_set_with_read_barrier) ?
+ may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
@@ -5279,6 +5278,7 @@
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
+ // These registers may be used for Baker read barriers too.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
// Ensure the card is in a byte register.
locations->AddTemp(Location::RegisterLocation(ECX));
@@ -5349,9 +5349,13 @@
DCHECK(needs_write_barrier);
Register register_value = value.AsRegister<Register>();
- NearLabel done, not_null, do_put;
+ // We cannot use a NearLabel for `done`, as its range may be too
+ // short when Baker read barriers are enabled.
+ Label done;
+ NearLabel not_null, do_put;
SlowPathCode* slow_path = nullptr;
- Register temp = locations->GetTemp(0).AsRegister<Register>();
+ Location temp_loc = locations->GetTemp(0);
+ Register temp = temp_loc.AsRegister<Register>();
if (may_need_runtime_call_for_type_check) {
slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
@@ -5365,33 +5369,77 @@
}
if (kEmitCompilerReadBarrier) {
- // When read barriers are enabled, the type checking
- // instrumentation requires two read barriers:
- //
- // __ movl(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = register_value->klass_
- // __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
- //
- // __ cmpl(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled.
- __ jmp(slow_path->GetEntryLabel());
+ if (!kUseBakerReadBarrier) {
+ // When (non-Baker) read barriers are enabled, the type
+ // checking instrumentation requires two read barriers
+ // generated by CodeGeneratorX86::GenerateReadBarrierSlow:
+ //
+ // __ movl(temp2, temp);
+ // // /* HeapReference<Class> */ temp = temp->component_type_
+ // __ movl(temp, Address(temp, component_offset));
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ //
+ // // /* HeapReference<Class> */ temp2 = register_value->klass_
+ // __ movl(temp2, Address(register_value, class_offset));
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
+ //
+ // __ cmpl(temp, temp2);
+ //
+ // However, the second read barrier may trash `temp`, as it
+ // is a temporary register, and as such would not be saved
+ // along with live registers before calling the runtime (nor
+ // restored afterwards). So in this case, we bail out and
+ // delegate the work to the array set slow path.
+ //
+ // TODO: Extend the register allocator to support a new
+ // "(locally) live temp" location so as to avoid always
+ // going into the slow path when read barriers are enabled?
+ //
+ // There is no such problem with Baker read barriers (see below).
+ __ jmp(slow_path->GetEntryLabel());
+ } else {
+ Location temp2_loc = locations->GetTemp(1);
+ Register temp2 = temp2_loc.AsRegister<Register>();
+ // /* HeapReference<Class> */ temp = array->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, temp_loc, array, class_offset, /* needs_null_check */ true);
+
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, temp_loc, temp, component_offset, /* needs_null_check */ false);
+ // Register `temp` is not trashed by the read barrier
+ // emitted by GenerateFieldLoadWithBakerReadBarrier below,
+ // as that method produces a call to a ReadBarrierMarkRegX
+ // entry point, which saves all potentially live registers,
+ // including temporaries such a `temp`.
+ // /* HeapReference<Class> */ temp2 = register_value->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, temp2_loc, register_value, class_offset, /* needs_null_check */ false);
+ // If heap poisoning is enabled, `temp` and `temp2` have
+ // been unpoisoned by the the previous calls to
+ // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier.
+ __ cmpl(temp, temp2);
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // We do not need to emit a read barrier for the
+ // following heap reference load, as `temp` is only used
+ // in a comparison with null below, and this reference
+ // is not kept afterwards. Also, if heap poisoning is
+ // enabled, there is no need to unpoison that heap
+ // reference for the same reason (comparison with null).
+ __ cmpl(Address(temp, super_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+ }
} else {
+ // Non read barrier code.
+
// /* HeapReference<Class> */ temp = array->klass_
__ movl(temp, Address(array, class_offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -5410,11 +5458,10 @@
// not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->super_class_
- __ movl(temp, Address(temp, super_offset));
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ testl(temp, temp);
+ // If heap poisoning is enabled, no need to unpoison the
+ // heap reference loaded below, as it is only used for a
+ // comparison with null.
+ __ cmpl(Address(temp, super_offset), Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(&do_put);
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ec37e5d..87b6de3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -481,6 +481,7 @@
DCHECK(instruction_->IsInstanceFieldGet() ||
instruction_->IsStaticFieldGet() ||
instruction_->IsArrayGet() ||
+ instruction_->IsArraySet() ||
instruction_->IsLoadClass() ||
instruction_->IsLoadString() ||
instruction_->IsInstanceOf() ||
@@ -4741,12 +4742,10 @@
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- bool object_array_set_with_read_barrier =
- kEmitCompilerReadBarrier && (value_type == Primitive::kPrimNot);
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
- (may_need_runtime_call_for_type_check || object_array_set_with_read_barrier) ?
+ may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
@@ -4760,10 +4759,8 @@
if (needs_write_barrier) {
// Temporary registers for the write barrier.
-
- // This first temporary register is possibly used for heap
- // reference poisoning and/or read barrier emission too.
- locations->AddTemp(Location::RequiresRegister());
+ // These registers may be used for Baker read barriers too.
+ locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too.
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -4833,9 +4830,13 @@
DCHECK(needs_write_barrier);
CpuRegister register_value = value.AsRegister<CpuRegister>();
- NearLabel done, not_null, do_put;
+ // We cannot use a NearLabel for `done`, as its range may be too
+ // short when Baker read barriers are enabled.
+ Label done;
+ NearLabel not_null, do_put;
SlowPathCode* slow_path = nullptr;
- CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ Location temp_loc = locations->GetTemp(0);
+ CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
if (may_need_runtime_call_for_type_check) {
slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
@@ -4849,33 +4850,77 @@
}
if (kEmitCompilerReadBarrier) {
- // When read barriers are enabled, the type checking
- // instrumentation requires two read barriers:
- //
- // __ movl(temp2, temp);
- // // /* HeapReference<Class> */ temp = temp->component_type_
- // __ movl(temp, Address(temp, component_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
- //
- // // /* HeapReference<Class> */ temp2 = register_value->klass_
- // __ movl(temp2, Address(register_value, class_offset));
- // codegen_->GenerateReadBarrierSlow(
- // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
- //
- // __ cmpl(temp, temp2);
- //
- // However, the second read barrier may trash `temp`, as it
- // is a temporary register, and as such would not be saved
- // along with live registers before calling the runtime (nor
- // restored afterwards). So in this case, we bail out and
- // delegate the work to the array set slow path.
- //
- // TODO: Extend the register allocator to support a new
- // "(locally) live temp" location so as to avoid always
- // going into the slow path when read barriers are enabled.
- __ jmp(slow_path->GetEntryLabel());
+ if (!kUseBakerReadBarrier) {
+ // When (non-Baker) read barriers are enabled, the type
+ // checking instrumentation requires two read barriers
+ // generated by CodeGeneratorX86_64::GenerateReadBarrierSlow:
+ //
+ // __ movl(temp2, temp);
+ // // /* HeapReference<Class> */ temp = temp->component_type_
+ // __ movl(temp, Address(temp, component_offset));
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
+ //
+ // // /* HeapReference<Class> */ temp2 = register_value->klass_
+ // __ movl(temp2, Address(register_value, class_offset));
+ // codegen_->GenerateReadBarrierSlow(
+ // instruction, temp2_loc, temp2_loc, value, class_offset, temp_loc);
+ //
+ // __ cmpl(temp, temp2);
+ //
+ // However, the second read barrier may trash `temp`, as it
+ // is a temporary register, and as such would not be saved
+ // along with live registers before calling the runtime (nor
+ // restored afterwards). So in this case, we bail out and
+ // delegate the work to the array set slow path.
+ //
+ // TODO: Extend the register allocator to support a new
+ // "(locally) live temp" location so as to avoid always
+ // going into the slow path when read barriers are enabled?
+ //
+ // There is no such problem with Baker read barriers (see below).
+ __ jmp(slow_path->GetEntryLabel());
+ } else {
+ Location temp2_loc = locations->GetTemp(1);
+ CpuRegister temp2 = temp2_loc.AsRegister<CpuRegister>();
+ // /* HeapReference<Class> */ temp = array->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, temp_loc, array, class_offset, /* needs_null_check */ true);
+
+ // /* HeapReference<Class> */ temp = temp->component_type_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, temp_loc, temp, component_offset, /* needs_null_check */ false);
+ // Register `temp` is not trashed by the read barrier
+ // emitted by GenerateFieldLoadWithBakerReadBarrier below,
+ // as that method produces a call to a ReadBarrierMarkRegX
+ // entry point, which saves all potentially live registers,
+ // including temporaries such a `temp`.
+ // /* HeapReference<Class> */ temp2 = register_value->klass_
+ codegen_->GenerateFieldLoadWithBakerReadBarrier(
+ instruction, temp2_loc, register_value, class_offset, /* needs_null_check */ false);
+ // If heap poisoning is enabled, `temp` and `temp2` have
+ // been unpoisoned by the the previous calls to
+ // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier.
+ __ cmpl(temp, temp2);
+
+ if (instruction->StaticTypeOfArrayIsObjectArray()) {
+ __ j(kEqual, &do_put);
+ // We do not need to emit a read barrier for the
+ // following heap reference load, as `temp` is only used
+ // in a comparison with null below, and this reference
+ // is not kept afterwards. Also, if heap poisoning is
+ // enabled, there is no need to unpoison that heap
+ // reference for the same reason (comparison with null).
+ __ cmpl(Address(temp, super_offset), Immediate(0));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_put);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+ }
} else {
+ // Non read barrier code.
+
// /* HeapReference<Class> */ temp = array->klass_
__ movl(temp, Address(array, class_offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -4894,11 +4939,10 @@
// not been unpoisoned yet; unpoison it now.
__ MaybeUnpoisonHeapReference(temp);
- // /* HeapReference<Class> */ temp = temp->super_class_
- __ movl(temp, Address(temp, super_offset));
- // If heap poisoning is enabled, no need to unpoison
- // `temp`, as we are comparing against null below.
- __ testl(temp, temp);
+ // If heap poisoning is enabled, no need to unpoison the
+ // heap reference loaded below, as it is only used for a
+ // comparison with null.
+ __ cmpl(Address(temp, super_offset), Immediate(0));
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(&do_put);
} else {
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 0bbc0e5..eefc642 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1286,10 +1286,8 @@
__ LoadImmediate(tmp_reg, 0);
}
- __ LoadFromOffset(kLoadWord, LR, TR,
- QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pIndexOf).Int32Value());
+ codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
- __ blx(LR);
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
@@ -1356,13 +1354,8 @@
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), EQ);
- __ LoadFromOffset(kLoadWord,
- LR,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromBytes).Int32Value());
+ codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
- __ blx(LR);
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
__ Bind(slow_path->GetExitLabel());
}
@@ -1378,21 +1371,14 @@
}
void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) {
- ArmAssembler* assembler = GetAssembler();
-
// No need to emit code checking whether `locations->InAt(2)` is a null
// pointer, as callers of the native method
//
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
//
// all include a null check on `data` before calling that method.
- __ LoadFromOffset(kLoadWord,
- LR,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromChars).Int32Value());
+ codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
- __ blx(LR);
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) {
@@ -1414,11 +1400,9 @@
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), EQ);
- __ LoadFromOffset(kLoadWord,
- LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pAllocStringFromString).Int32Value());
+ codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc(), slow_path);
CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
- __ blx(LR);
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+
__ Bind(slow_path->GetExitLabel());
}
@@ -1450,7 +1434,7 @@
}
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Temporary register IP cannot be used in
- // ReadBarrierSystemArrayCopySlowPathARM64 (because that register
+ // ReadBarrierSystemArrayCopySlowPathARM (because that register
// is clobbered by ReadBarrierMarkRegX entry points). Get an extra
// temporary register from the register allocator.
locations->AddTemp(Location::RequiresRegister());
@@ -1994,13 +1978,11 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>()));
- __ blx(LR);
- codegen->RecordPcInfo(invoke, invoke->GetDexPc());
+ codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
__ vmovdrr(FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>()),
calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1));
@@ -2020,7 +2002,6 @@
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(2)));
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(3)));
- __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmPointerSize>(entry).Int32Value());
// Native code uses the soft float ABI.
__ vmovrrd(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1),
@@ -2028,8 +2009,7 @@
__ vmovrrd(calling_convention.GetRegisterAt(2),
calling_convention.GetRegisterAt(3),
FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>()));
- __ blx(LR);
- codegen->RecordPcInfo(invoke, invoke->GetDexPc());
+ codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
__ vmovdrr(FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>()),
calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1));
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index dfc35b7..9fc683d 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -215,8 +215,8 @@
// List of exception blocks to generate at the end of the code cache.
ArenaVector<std::unique_ptr<ArmVIXLJNIMacroAssembler::ArmException>> exception_blocks_;
// Used for testing.
- friend class ArmVIXAssemblerTest_VixlLoadFromOffset_Test;
- friend class ArmVIXAssemblerTest_VixlStoreToOffset_Test;
+ friend class ArmVIXLAssemblerTest_VixlLoadFromOffset_Test;
+ friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test;
};
} // namespace arm
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 41cb04b..367ed97 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -1624,9 +1624,9 @@
typedef arm::Thumb2Assembler AssemblerType;
#endif
-class ArmVIXAssemblerTest : public ::testing::Test {
+class ArmVIXLAssemblerTest : public ::testing::Test {
public:
- ArmVIXAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
+ ArmVIXLAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
ArenaPool pool;
ArenaAllocator arena;
@@ -1658,7 +1658,7 @@
#undef __
#define __ assembler.
-TEST_F(ArmVIXAssemblerTest, VixlJniHelpers) {
+TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) {
const bool is_static = true;
const bool is_synchronized = false;
const char* shorty = "IIFII";
@@ -1689,7 +1689,7 @@
__ Load(scratch_register, FrameOffset(4092), 4);
__ Load(scratch_register, FrameOffset(4096), 4);
__ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
- __ LoadRef(method_register, scratch_register, MemberOffset(128), true);
+ __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference */ false);
// Stores
__ Store(FrameOffset(32), method_register, 4);
@@ -1743,7 +1743,7 @@
#define __ assembler.asm_.
#endif
-TEST_F(ArmVIXAssemblerTest, VixlLoadFromOffset) {
+TEST_F(ArmVIXLAssemblerTest, VixlLoadFromOffset) {
__ LoadFromOffset(kLoadWord, R2, R4, 12);
__ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
__ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
@@ -1773,7 +1773,7 @@
EmitAndCheck(&assembler, "VixlLoadFromOffset");
}
-TEST_F(ArmVIXAssemblerTest, VixlStoreToOffset) {
+TEST_F(ArmVIXLAssemblerTest, VixlStoreToOffset) {
__ StoreToOffset(kStoreWord, R2, R4, 12);
__ StoreToOffset(kStoreWord, R2, R4, 0xfff);
__ StoreToOffset(kStoreWord, R2, R4, 0x1000);
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index a2621cb..1a21df9 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1038,129 +1038,15 @@
}
void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
- if (IsUint<16>(value)) {
- // Use OR with (unsigned) immediate to encode 16b unsigned int.
- Ori(rd, ZERO, value);
- } else if (IsInt<16>(value)) {
- // Use ADD with (signed) immediate to encode 16b signed int.
- Addiu(rd, ZERO, value);
- } else {
- Lui(rd, value >> 16);
- if (value & 0xFFFF)
- Ori(rd, rd, value);
- }
+ TemplateLoadConst32(this, rd, value);
+}
+
+// This function is only used for testing purposes.
+void Mips64Assembler::RecordLoadConst64Path(int value ATTRIBUTE_UNUSED) {
}
void Mips64Assembler::LoadConst64(GpuRegister rd, int64_t value) {
- int bit31 = (value & UINT64_C(0x80000000)) != 0;
-
- // Loads with 1 instruction.
- if (IsUint<16>(value)) {
- Ori(rd, ZERO, value);
- } else if (IsInt<16>(value)) {
- Daddiu(rd, ZERO, value);
- } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
- Lui(rd, value >> 16);
- } else if (IsInt<32>(value)) {
- // Loads with 2 instructions.
- Lui(rd, value >> 16);
- Ori(rd, rd, value);
- } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
- Ori(rd, ZERO, value);
- Dahi(rd, value >> 32);
- } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
- Ori(rd, ZERO, value);
- Dati(rd, value >> 48);
- } else if ((value & 0xFFFF) == 0 &&
- (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
- Lui(rd, value >> 16);
- Dahi(rd, (value >> 32) + bit31);
- } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
- Lui(rd, value >> 16);
- Dati(rd, (value >> 48) + bit31);
- } else if (IsPowerOfTwo(value + UINT64_C(1))) {
- int shift_cnt = 64 - CTZ(value + UINT64_C(1));
- Daddiu(rd, ZERO, -1);
- if (shift_cnt < 32) {
- Dsrl(rd, rd, shift_cnt);
- } else {
- Dsrl32(rd, rd, shift_cnt & 31);
- }
- } else {
- int shift_cnt = CTZ(value);
- int64_t tmp = value >> shift_cnt;
- if (IsUint<16>(tmp)) {
- Ori(rd, ZERO, tmp);
- if (shift_cnt < 32) {
- Dsll(rd, rd, shift_cnt);
- } else {
- Dsll32(rd, rd, shift_cnt & 31);
- }
- } else if (IsInt<16>(tmp)) {
- Daddiu(rd, ZERO, tmp);
- if (shift_cnt < 32) {
- Dsll(rd, rd, shift_cnt);
- } else {
- Dsll32(rd, rd, shift_cnt & 31);
- }
- } else if (IsInt<32>(tmp)) {
- // Loads with 3 instructions.
- Lui(rd, tmp >> 16);
- Ori(rd, rd, tmp);
- if (shift_cnt < 32) {
- Dsll(rd, rd, shift_cnt);
- } else {
- Dsll32(rd, rd, shift_cnt & 31);
- }
- } else {
- shift_cnt = 16 + CTZ(value >> 16);
- tmp = value >> shift_cnt;
- if (IsUint<16>(tmp)) {
- Ori(rd, ZERO, tmp);
- if (shift_cnt < 32) {
- Dsll(rd, rd, shift_cnt);
- } else {
- Dsll32(rd, rd, shift_cnt & 31);
- }
- Ori(rd, rd, value);
- } else if (IsInt<16>(tmp)) {
- Daddiu(rd, ZERO, tmp);
- if (shift_cnt < 32) {
- Dsll(rd, rd, shift_cnt);
- } else {
- Dsll32(rd, rd, shift_cnt & 31);
- }
- Ori(rd, rd, value);
- } else {
- // Loads with 3-4 instructions.
- uint64_t tmp2 = value;
- bool used_lui = false;
- if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
- Lui(rd, tmp2 >> 16);
- used_lui = true;
- }
- if ((tmp2 & 0xFFFF) != 0) {
- if (used_lui) {
- Ori(rd, rd, tmp2);
- } else {
- Ori(rd, ZERO, tmp2);
- }
- }
- if (bit31) {
- tmp2 += UINT64_C(0x100000000);
- }
- if (((tmp2 >> 32) & 0xFFFF) != 0) {
- Dahi(rd, tmp2 >> 32);
- }
- if (tmp2 & UINT64_C(0x800000000000)) {
- tmp2 += UINT64_C(0x1000000000000);
- }
- if ((tmp2 >> 48) != 0) {
- Dati(rd, tmp2 >> 48);
- }
- }
- }
- }
+ TemplateLoadConst64(this, rd, value);
}
void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index a7d350c..6277b5d 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -33,6 +33,237 @@
namespace art {
namespace mips64 {
+enum LoadConst64Path {
+ kLoadConst64PathZero = 0x0,
+ kLoadConst64PathOri = 0x1,
+ kLoadConst64PathDaddiu = 0x2,
+ kLoadConst64PathLui = 0x4,
+ kLoadConst64PathLuiOri = 0x8,
+ kLoadConst64PathOriDahi = 0x10,
+ kLoadConst64PathOriDati = 0x20,
+ kLoadConst64PathLuiDahi = 0x40,
+ kLoadConst64PathLuiDati = 0x80,
+ kLoadConst64PathDaddiuDsrlX = 0x100,
+ kLoadConst64PathOriDsllX = 0x200,
+ kLoadConst64PathDaddiuDsllX = 0x400,
+ kLoadConst64PathLuiOriDsllX = 0x800,
+ kLoadConst64PathOriDsllXOri = 0x1000,
+ kLoadConst64PathDaddiuDsllXOri = 0x2000,
+ kLoadConst64PathDaddiuDahi = 0x4000,
+ kLoadConst64PathDaddiuDati = 0x8000,
+ kLoadConst64PathDinsu1 = 0x10000,
+ kLoadConst64PathDinsu2 = 0x20000,
+ kLoadConst64PathCatchAll = 0x40000,
+ kLoadConst64PathAllPaths = 0x7ffff,
+};
+
+template <typename Asm>
+void TemplateLoadConst32(Asm* a, GpuRegister rd, int32_t value) {
+ if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
+ a->Ori(rd, ZERO, value);
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
+ a->Addiu(rd, ZERO, value);
+ } else {
+ // Set 16 most significant bits of value. The "lui" instruction
+ // also clears the 16 least significant bits to zero.
+ a->Lui(rd, value >> 16);
+ if (value & 0xFFFF) {
+ // If the 16 least significant bits are non-zero, set them
+ // here.
+ a->Ori(rd, rd, value);
+ }
+ }
+}
+
+static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
+ int32_t x = Low32Bits(value);
+ int32_t y = High32Bits(value);
+
+ if (x == y) {
+ return (IsUint<16>(x) || IsInt<16>(x) || ((x & 0xFFFF) == 0 && IsInt<16>(value >> 16))) ? 2 : 3;
+ }
+
+ return INT_MAX;
+}
+
+template <typename Asm, typename Rtype, typename Vtype>
+void TemplateLoadConst64(Asm* a, Rtype rd, Vtype value) {
+ int bit31 = (value & UINT64_C(0x80000000)) != 0;
+ int rep32_count = InstrCountForLoadReplicatedConst32(value);
+
+ // Loads with 1 instruction.
+ if (IsUint<16>(value)) {
+ // 64-bit value can be loaded as an unsigned 16-bit number.
+ a->RecordLoadConst64Path(kLoadConst64PathOri);
+ a->Ori(rd, ZERO, value);
+ } else if (IsInt<16>(value)) {
+ // 64-bit value can be loaded as an signed 16-bit number.
+ a->RecordLoadConst64Path(kLoadConst64PathDaddiu);
+ a->Daddiu(rd, ZERO, value);
+ } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+ // 64-bit value can be loaded as an signed 32-bit number which has all
+ // of its 16 least significant bits set to zero.
+ a->RecordLoadConst64Path(kLoadConst64PathLui);
+ a->Lui(rd, value >> 16);
+ } else if (IsInt<32>(value)) {
+ // Loads with 2 instructions.
+ // 64-bit value can be loaded as an signed 32-bit number which has some
+ // or all of its 16 least significant bits set to one.
+ a->RecordLoadConst64Path(kLoadConst64PathLuiOri);
+ a->Lui(rd, value >> 16);
+ a->Ori(rd, rd, value);
+ } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+ // 64-bit value which consists of an unsigned 16-bit value in its
+ // least significant 32-bits, and a signed 16-bit value in its
+ // most significant 32-bits.
+ a->RecordLoadConst64Path(kLoadConst64PathOriDahi);
+ a->Ori(rd, ZERO, value);
+ a->Dahi(rd, value >> 32);
+ } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+ // 64-bit value which consists of an unsigned 16-bit value in its
+ // least significant 48-bits, and a signed 16-bit value in its
+ // most significant 16-bits.
+ a->RecordLoadConst64Path(kLoadConst64PathOriDati);
+ a->Ori(rd, ZERO, value);
+ a->Dati(rd, value >> 48);
+ } else if ((value & 0xFFFF) == 0 &&
+ (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
+ // 16 LSBs (Least Significant Bits) all set to zero.
+ // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
+ a->RecordLoadConst64Path(kLoadConst64PathLuiDahi);
+ a->Lui(rd, value >> 16);
+ a->Dahi(rd, (value >> 32) + bit31);
+ } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+ // 16 LSBs all set to zero.
+ // 48 MSBs hold a signed value which can't be represented by signed
+ // 32-bit number, and the middle 16 bits are all zero, or all one.
+ a->RecordLoadConst64Path(kLoadConst64PathLuiDati);
+ a->Lui(rd, value >> 16);
+ a->Dati(rd, (value >> 48) + bit31);
+ } else if (IsInt<16>(static_cast<int32_t>(value)) &&
+ (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
+ // 32 LSBs contain an unsigned 16-bit number.
+ // 32 MSBs contain a signed 16-bit number.
+ a->RecordLoadConst64Path(kLoadConst64PathDaddiuDahi);
+ a->Daddiu(rd, ZERO, value);
+ a->Dahi(rd, (value >> 32) + bit31);
+ } else if (IsInt<16>(static_cast<int32_t>(value)) &&
+ ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+ // 48 LSBs contain an unsigned 16-bit number.
+ // 16 MSBs contain a signed 16-bit number.
+ a->RecordLoadConst64Path(kLoadConst64PathDaddiuDati);
+ a->Daddiu(rd, ZERO, value);
+ a->Dati(rd, (value >> 48) + bit31);
+ } else if (IsPowerOfTwo(value + UINT64_C(1))) {
+ // 64-bit values which have their "n" MSBs set to one, and their
+ // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
+ int shift_cnt = 64 - CTZ(value + UINT64_C(1));
+ a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsrlX);
+ a->Daddiu(rd, ZERO, -1);
+ if (shift_cnt < 32) {
+ a->Dsrl(rd, rd, shift_cnt);
+ } else {
+ a->Dsrl32(rd, rd, shift_cnt & 31);
+ }
+ } else {
+ int shift_cnt = CTZ(value);
+ int64_t tmp = value >> shift_cnt;
+ a->RecordLoadConst64Path(kLoadConst64PathOriDsllX);
+ if (IsUint<16>(tmp)) {
+ // Value can be computed by loading a 16-bit unsigned value, and
+ // then shifting left.
+ a->Ori(rd, ZERO, tmp);
+ if (shift_cnt < 32) {
+ a->Dsll(rd, rd, shift_cnt);
+ } else {
+ a->Dsll32(rd, rd, shift_cnt & 31);
+ }
+ } else if (IsInt<16>(tmp)) {
+ // Value can be computed by loading a 16-bit signed value, and
+ // then shifting left.
+ a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsllX);
+ a->Daddiu(rd, ZERO, tmp);
+ if (shift_cnt < 32) {
+ a->Dsll(rd, rd, shift_cnt);
+ } else {
+ a->Dsll32(rd, rd, shift_cnt & 31);
+ }
+ } else if (rep32_count < 3) {
+ // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
+ // value loaded into the 32 LSBs can be loaded with a single
+ // MIPS instruction.
+ a->LoadConst32(rd, value);
+ a->Dinsu(rd, rd, 32, 32);
+ a->RecordLoadConst64Path(kLoadConst64PathDinsu1);
+ } else if (IsInt<32>(tmp)) {
+ // Loads with 3 instructions.
+ // Value can be computed by loading a 32-bit signed value, and
+ // then shifting left.
+ a->RecordLoadConst64Path(kLoadConst64PathLuiOriDsllX);
+ a->Lui(rd, tmp >> 16);
+ a->Ori(rd, rd, tmp);
+ if (shift_cnt < 32) {
+ a->Dsll(rd, rd, shift_cnt);
+ } else {
+ a->Dsll32(rd, rd, shift_cnt & 31);
+ }
+ } else {
+ shift_cnt = 16 + CTZ(value >> 16);
+ tmp = value >> shift_cnt;
+ if (IsUint<16>(tmp)) {
+ // Value can be computed by loading a 16-bit unsigned value,
+ // shifting left, and "or"ing in another 16-bit unsigned value.
+ a->RecordLoadConst64Path(kLoadConst64PathOriDsllXOri);
+ a->Ori(rd, ZERO, tmp);
+ if (shift_cnt < 32) {
+ a->Dsll(rd, rd, shift_cnt);
+ } else {
+ a->Dsll32(rd, rd, shift_cnt & 31);
+ }
+ a->Ori(rd, rd, value);
+ } else if (IsInt<16>(tmp)) {
+ // Value can be computed by loading a 16-bit signed value,
+ // shifting left, and "or"ing in a 16-bit unsigned value.
+ a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsllXOri);
+ a->Daddiu(rd, ZERO, tmp);
+ if (shift_cnt < 32) {
+ a->Dsll(rd, rd, shift_cnt);
+ } else {
+ a->Dsll32(rd, rd, shift_cnt & 31);
+ }
+ a->Ori(rd, rd, value);
+ } else if (rep32_count < 4) {
+ // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
+ // value in the 32 LSBs requires 2 MIPS instructions to load.
+ a->LoadConst32(rd, value);
+ a->Dinsu(rd, rd, 32, 32);
+ a->RecordLoadConst64Path(kLoadConst64PathDinsu2);
+ } else {
+ // Loads with 3-4 instructions.
+ // Catch-all case to get any other 64-bit values which aren't
+ // handled by special cases above.
+ uint64_t tmp2 = value;
+ a->RecordLoadConst64Path(kLoadConst64PathCatchAll);
+ a->LoadConst32(rd, value);
+ if (bit31) {
+ tmp2 += UINT64_C(0x100000000);
+ }
+ if (((tmp2 >> 32) & 0xFFFF) != 0) {
+ a->Dahi(rd, tmp2 >> 32);
+ }
+ if (tmp2 & UINT64_C(0x800000000000)) {
+ tmp2 += UINT64_C(0x1000000000000);
+ }
+ if ((tmp2 >> 48) != 0) {
+ a->Dati(rd, tmp2 >> 48);
+ }
+ }
+ }
+ }
+}
+
static constexpr size_t kMips64WordSize = 4;
static constexpr size_t kMips64DoublewordSize = 8;
@@ -326,9 +557,13 @@
void Not(GpuRegister rd, GpuRegister rs);
// Higher level composite instructions.
+ int InstrCountForLoadReplicatedConst32(int64_t);
void LoadConst32(GpuRegister rd, int32_t value);
void LoadConst64(GpuRegister rd, int64_t value); // MIPS64
+ // This function is only used for testing purposes.
+ void RecordLoadConst64Path(int value);
+
void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT); // MIPS64
void Bind(Label* label) OVERRIDE {
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index b758d64..1fdef96 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -1636,6 +1636,177 @@
DriverStr(expected, "StoreFpuToOffset");
}
+///////////////////////
+// Loading Constants //
+///////////////////////
+
+TEST_F(AssemblerMIPS64Test, LoadConst32) {
+ // IsUint<16>(value)
+ __ LoadConst32(mips64::V0, 0);
+ __ LoadConst32(mips64::V0, 65535);
+ // IsInt<16>(value)
+ __ LoadConst32(mips64::V0, -1);
+ __ LoadConst32(mips64::V0, -32768);
+ // Everything else
+ __ LoadConst32(mips64::V0, 65536);
+ __ LoadConst32(mips64::V0, 65537);
+ __ LoadConst32(mips64::V0, 2147483647);
+ __ LoadConst32(mips64::V0, -32769);
+ __ LoadConst32(mips64::V0, -65536);
+ __ LoadConst32(mips64::V0, -65537);
+ __ LoadConst32(mips64::V0, -2147483647);
+ __ LoadConst32(mips64::V0, -2147483648);
+
+ const char* expected =
+ // IsUint<16>(value)
+ "ori $v0, $zero, 0\n" // __ LoadConst32(mips64::V0, 0);
+ "ori $v0, $zero, 65535\n" // __ LoadConst32(mips64::V0, 65535);
+ // IsInt<16>(value)
+ "addiu $v0, $zero, -1\n" // __ LoadConst32(mips64::V0, -1);
+ "addiu $v0, $zero, -32768\n" // __ LoadConst32(mips64::V0, -32768);
+ // Everything else
+ "lui $v0, 1\n" // __ LoadConst32(mips64::V0, 65536);
+ "lui $v0, 1\n" // __ LoadConst32(mips64::V0, 65537);
+ "ori $v0, 1\n" // "
+ "lui $v0, 32767\n" // __ LoadConst32(mips64::V0, 2147483647);
+ "ori $v0, 65535\n" // "
+ "lui $v0, 65535\n" // __ LoadConst32(mips64::V0, -32769);
+ "ori $v0, 32767\n" // "
+ "lui $v0, 65535\n" // __ LoadConst32(mips64::V0, -65536);
+ "lui $v0, 65534\n" // __ LoadConst32(mips64::V0, -65537);
+ "ori $v0, 65535\n" // "
+ "lui $v0, 32768\n" // __ LoadConst32(mips64::V0, -2147483647);
+ "ori $v0, 1\n" // "
+ "lui $v0, 32768\n"; // __ LoadConst32(mips64::V0, -2147483648);
+ DriverStr(expected, "LoadConst32");
+}
+
+static uint64_t SignExtend16To64(uint16_t n) {
+ return static_cast<int16_t>(n);
+}
+
+// The art::mips64::Mips64Assembler::LoadConst64() method uses a template
+// to minimize the number of instructions needed to load a 64-bit constant
+// value into a register. The template calls various methods which emit
+// MIPS machine instructions. This struct (class) uses the same template
+// but overrides the definitions of the methods which emit MIPS instructions
+// to use methods which simulate the operation of the corresponding MIPS
+// instructions. After invoking LoadConst64() the target register should
+// contain the same 64-bit value as was input to LoadConst64(). If the
+// simulated register doesn't contain the correct value then there is probably
+// an error in the template function.
+struct LoadConst64Tester {
+ LoadConst64Tester() {
+ // Initialize all of the registers for simulation to zero.
+ for (int r = 0; r < 32; r++) {
+ regs_[r] = 0;
+ }
+ // Clear all of the path flags.
+ loadconst64_paths_ = art::mips64::kLoadConst64PathZero;
+ }
+ void Addiu(mips64::GpuRegister rd, mips64::GpuRegister rs, uint16_t c) {
+ regs_[rd] = static_cast<int32_t>(regs_[rs] + SignExtend16To64(c));
+ }
+ void Daddiu(mips64::GpuRegister rd, mips64::GpuRegister rs, uint16_t c) {
+ regs_[rd] = regs_[rs] + SignExtend16To64(c);
+ }
+ void Dahi(mips64::GpuRegister rd, uint16_t c) {
+ regs_[rd] += SignExtend16To64(c) << 32;
+ }
+ void Dati(mips64::GpuRegister rd, uint16_t c) {
+ regs_[rd] += SignExtend16To64(c) << 48;
+ }
+ void Dinsu(mips64::GpuRegister rt, mips64::GpuRegister rs, int pos, int size) {
+ CHECK(IsUint<5>(pos - 32)) << pos;
+ CHECK(IsUint<5>(size - 1)) << size;
+ CHECK(IsUint<5>(pos + size - 33)) << pos << " + " << size;
+ uint64_t src_mask = (UINT64_C(1) << size) - 1;
+ uint64_t dsk_mask = ~(src_mask << pos);
+
+ regs_[rt] = (regs_[rt] & dsk_mask) | ((regs_[rs] & src_mask) << pos);
+ }
+ void Dsll(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
+ regs_[rd] = regs_[rt] << (shamt & 0x1f);
+ }
+ void Dsll32(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
+ regs_[rd] = regs_[rt] << (32 + (shamt & 0x1f));
+ }
+ void Dsrl(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
+ regs_[rd] = regs_[rt] >> (shamt & 0x1f);
+ }
+ void Dsrl32(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
+ regs_[rd] = regs_[rt] >> (32 + (shamt & 0x1f));
+ }
+ void Lui(mips64::GpuRegister rd, uint16_t c) {
+ regs_[rd] = SignExtend16To64(c) << 16;
+ }
+ void Ori(mips64::GpuRegister rd, mips64::GpuRegister rs, uint16_t c) {
+ regs_[rd] = regs_[rs] | c;
+ }
+ void LoadConst32(mips64::GpuRegister rd, int32_t c) {
+ CHECK_NE(rd, 0);
+ mips64::TemplateLoadConst32<LoadConst64Tester>(this, rd, c);
+ CHECK_EQ(regs_[rd], static_cast<uint64_t>(c));
+ }
+ void LoadConst64(mips64::GpuRegister rd, int64_t c) {
+ CHECK_NE(rd, 0);
+ mips64::TemplateLoadConst64<LoadConst64Tester>(this, rd, c);
+ CHECK_EQ(regs_[rd], static_cast<uint64_t>(c));
+ }
+ uint64_t regs_[32];
+
+ // Getter function for loadconst64_paths_.
+ int GetPathsCovered() {
+ return loadconst64_paths_;
+ }
+
+ void RecordLoadConst64Path(int value) {
+ loadconst64_paths_ |= value;
+ }
+
+ private:
+ // This variable holds a bitmask to tell us which paths were taken
+ // through the template function which loads 64-bit values.
+ int loadconst64_paths_;
+};
+
+TEST_F(AssemblerMIPS64Test, LoadConst64) {
+ const uint16_t imms[] = {
+ 0, 1, 2, 3, 4, 0x33, 0x66, 0x55, 0x99, 0xaa, 0xcc, 0xff, 0x5500, 0x5555,
+ 0x7ffc, 0x7ffd, 0x7ffe, 0x7fff, 0x8000, 0x8001, 0x8002, 0x8003, 0x8004,
+ 0xaaaa, 0xfffc, 0xfffd, 0xfffe, 0xffff
+ };
+ unsigned d0, d1, d2, d3;
+ LoadConst64Tester tester;
+
+ union {
+ int64_t v64;
+ uint16_t v16[4];
+ } u;
+
+ for (d3 = 0; d3 < sizeof imms / sizeof imms[0]; d3++) {
+ u.v16[3] = imms[d3];
+
+ for (d2 = 0; d2 < sizeof imms / sizeof imms[0]; d2++) {
+ u.v16[2] = imms[d2];
+
+ for (d1 = 0; d1 < sizeof imms / sizeof imms[0]; d1++) {
+ u.v16[1] = imms[d1];
+
+ for (d0 = 0; d0 < sizeof imms / sizeof imms[0]; d0++) {
+ u.v16[0] = imms[d0];
+
+ tester.LoadConst64(mips64::V0, u.v64);
+ }
+ }
+ }
+ }
+
+ // Verify that we tested all paths through the "load 64-bit value"
+ // function template.
+ EXPECT_EQ(tester.GetPathsCovered(), art::mips64::kLoadConst64PathAllPaths);
+}
+
#undef __
} // namespace art
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index d38cc91..a9d00d3 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -65,12 +65,6 @@
liblog \
libz \
libbacktrace \
- libLLVMObject \
- libLLVMBitReader \
- libLLVMMC \
- libLLVMMCParser \
- libLLVMCore \
- libLLVMSupport \
libcutils \
libunwindbacktrace \
libutils \
diff --git a/runtime/Android.bp b/runtime/Android.bp
new file mode 100644
index 0000000..012256e
--- /dev/null
+++ b/runtime/Android.bp
@@ -0,0 +1,461 @@
+//
+// Copyright (C) 2011 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Keep the __jit_debug_register_code symbol as a unique symbol during ICF for architectures where
+// we use gold as the linker (arm, x86, x86_64). The symbol is used by the debuggers to detect when
+// new jit code is generated. We don't want it to be called when a different function with the same
+// (empty) body is called.
+JIT_DEBUG_REGISTER_CODE_LDFLAGS = ["-Wl,--keep-unique,__jit_debug_register_code"]
+
+cc_defaults {
+ name: "libart_defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ srcs: [
+ "art_field.cc",
+ "art_method.cc",
+ "atomic.cc",
+ "barrier.cc",
+ "base/allocator.cc",
+ "base/arena_allocator.cc",
+ "base/arena_bit_vector.cc",
+ "base/bit_vector.cc",
+ "base/file_magic.cc",
+ "base/hex_dump.cc",
+ "base/logging.cc",
+ "base/mutex.cc",
+ "base/scoped_arena_allocator.cc",
+ "base/scoped_flock.cc",
+ "base/stringpiece.cc",
+ "base/stringprintf.cc",
+ "base/time_utils.cc",
+ "base/timing_logger.cc",
+ "base/unix_file/fd_file.cc",
+ "base/unix_file/random_access_file_utils.cc",
+ "check_jni.cc",
+ "class_linker.cc",
+ "class_table.cc",
+ "code_simulator_container.cc",
+ "common_throws.cc",
+ "compiler_filter.cc",
+ "debugger.cc",
+ "dex_file.cc",
+ "dex_file_verifier.cc",
+ "dex_instruction.cc",
+ "elf_file.cc",
+ "fault_handler.cc",
+ "gc/allocation_record.cc",
+ "gc/allocator/dlmalloc.cc",
+ "gc/allocator/rosalloc.cc",
+ "gc/accounting/bitmap.cc",
+ "gc/accounting/card_table.cc",
+ "gc/accounting/heap_bitmap.cc",
+ "gc/accounting/mod_union_table.cc",
+ "gc/accounting/remembered_set.cc",
+ "gc/accounting/space_bitmap.cc",
+ "gc/collector/concurrent_copying.cc",
+ "gc/collector/garbage_collector.cc",
+ "gc/collector/immune_region.cc",
+ "gc/collector/immune_spaces.cc",
+ "gc/collector/mark_compact.cc",
+ "gc/collector/mark_sweep.cc",
+ "gc/collector/partial_mark_sweep.cc",
+ "gc/collector/semi_space.cc",
+ "gc/collector/sticky_mark_sweep.cc",
+ "gc/gc_cause.cc",
+ "gc/heap.cc",
+ "gc/reference_processor.cc",
+ "gc/reference_queue.cc",
+ "gc/scoped_gc_critical_section.cc",
+ "gc/space/bump_pointer_space.cc",
+ "gc/space/dlmalloc_space.cc",
+ "gc/space/image_space.cc",
+ "gc/space/large_object_space.cc",
+ "gc/space/malloc_space.cc",
+ "gc/space/region_space.cc",
+ "gc/space/rosalloc_space.cc",
+ "gc/space/space.cc",
+ "gc/space/zygote_space.cc",
+ "gc/task_processor.cc",
+ "hprof/hprof.cc",
+ "image.cc",
+ "indirect_reference_table.cc",
+ "instrumentation.cc",
+ "intern_table.cc",
+ "interpreter/interpreter.cc",
+ "interpreter/interpreter_common.cc",
+ "interpreter/interpreter_goto_table_impl.cc",
+ "interpreter/interpreter_switch_impl.cc",
+ "interpreter/unstarted_runtime.cc",
+ "java_vm_ext.cc",
+ "jdwp/jdwp_event.cc",
+ "jdwp/jdwp_expand_buf.cc",
+ "jdwp/jdwp_handler.cc",
+ "jdwp/jdwp_main.cc",
+ "jdwp/jdwp_request.cc",
+ "jdwp/jdwp_socket.cc",
+ "jdwp/object_registry.cc",
+ "jni_env_ext.cc",
+ "jit/debugger_interface.cc",
+ "jit/jit.cc",
+ "jit/jit_code_cache.cc",
+ "jit/offline_profiling_info.cc",
+ "jit/profiling_info.cc",
+ "jit/profile_saver.cc",
+ "jni_internal.cc",
+ "jobject_comparator.cc",
+ "linear_alloc.cc",
+ "mem_map.cc",
+ "memory_region.cc",
+ "mirror/abstract_method.cc",
+ "mirror/array.cc",
+ "mirror/class.cc",
+ "mirror/dex_cache.cc",
+ "mirror/field.cc",
+ "mirror/method.cc",
+ "mirror/object.cc",
+ "mirror/reference.cc",
+ "mirror/stack_trace_element.cc",
+ "mirror/string.cc",
+ "mirror/throwable.cc",
+ "monitor.cc",
+ "native_bridge_art_interface.cc",
+ "native_stack_dump.cc",
+ "native/dalvik_system_DexFile.cc",
+ "native/dalvik_system_VMDebug.cc",
+ "native/dalvik_system_VMRuntime.cc",
+ "native/dalvik_system_VMStack.cc",
+ "native/dalvik_system_ZygoteHooks.cc",
+ "native/java_lang_Class.cc",
+ "native/java_lang_DexCache.cc",
+ "native/java_lang_Object.cc",
+ "native/java_lang_String.cc",
+ "native/java_lang_StringFactory.cc",
+ "native/java_lang_System.cc",
+ "native/java_lang_Thread.cc",
+ "native/java_lang_Throwable.cc",
+ "native/java_lang_VMClassLoader.cc",
+ "native/java_lang_ref_FinalizerReference.cc",
+ "native/java_lang_ref_Reference.cc",
+ "native/java_lang_reflect_AbstractMethod.cc",
+ "native/java_lang_reflect_Array.cc",
+ "native/java_lang_reflect_Constructor.cc",
+ "native/java_lang_reflect_Field.cc",
+ "native/java_lang_reflect_Method.cc",
+ "native/java_lang_reflect_Proxy.cc",
+ "native/java_util_concurrent_atomic_AtomicLong.cc",
+ "native/libcore_util_CharsetUtils.cc",
+ "native/org_apache_harmony_dalvik_ddmc_DdmServer.cc",
+ "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc",
+ "native/sun_misc_Unsafe.cc",
+ "oat.cc",
+ "oat_file.cc",
+ "oat_file_assistant.cc",
+ "oat_file_manager.cc",
+ "oat_quick_method_header.cc",
+ "object_lock.cc",
+ "offsets.cc",
+ "os_linux.cc",
+ "parsed_options.cc",
+ "plugin.cc",
+ "primitive.cc",
+ "quick_exception_handler.cc",
+ "quick/inline_method_analyser.cc",
+ "reference_table.cc",
+ "reflection.cc",
+ "runtime.cc",
+ "runtime_options.cc",
+ "signal_catcher.cc",
+ "stack.cc",
+ "stack_map.cc",
+ "thread.cc",
+ "thread_list.cc",
+ "thread_pool.cc",
+ "ti/agent.cc",
+ "trace.cc",
+ "transaction.cc",
+ "type_lookup_table.cc",
+ "utf.cc",
+ "utils.cc",
+ "verifier/instruction_flags.cc",
+ "verifier/method_verifier.cc",
+ "verifier/reg_type.cc",
+ "verifier/reg_type_cache.cc",
+ "verifier/register_line.cc",
+ "well_known_classes.cc",
+ "zip_archive.cc",
+
+ "arch/context.cc",
+ "arch/instruction_set.cc",
+ "arch/instruction_set_features.cc",
+ "arch/memcmp16.cc",
+ "arch/arm/instruction_set_features_arm.cc",
+ "arch/arm/registers_arm.cc",
+ "arch/arm64/instruction_set_features_arm64.cc",
+ "arch/arm64/registers_arm64.cc",
+ "arch/mips/instruction_set_features_mips.cc",
+ "arch/mips/registers_mips.cc",
+ "arch/mips64/instruction_set_features_mips64.cc",
+ "arch/mips64/registers_mips64.cc",
+ "arch/x86/instruction_set_features_x86.cc",
+ "arch/x86/registers_x86.cc",
+ "arch/x86_64/registers_x86_64.cc",
+ "entrypoints/entrypoint_utils.cc",
+ "entrypoints/jni/jni_entrypoints.cc",
+ "entrypoints/math_entrypoints.cc",
+ "entrypoints/quick/quick_alloc_entrypoints.cc",
+ "entrypoints/quick/quick_cast_entrypoints.cc",
+ "entrypoints/quick/quick_deoptimization_entrypoints.cc",
+ "entrypoints/quick/quick_dexcache_entrypoints.cc",
+ "entrypoints/quick/quick_field_entrypoints.cc",
+ "entrypoints/quick/quick_fillarray_entrypoints.cc",
+ "entrypoints/quick/quick_instrumentation_entrypoints.cc",
+ "entrypoints/quick/quick_jni_entrypoints.cc",
+ "entrypoints/quick/quick_lock_entrypoints.cc",
+ "entrypoints/quick/quick_math_entrypoints.cc",
+ "entrypoints/quick/quick_thread_entrypoints.cc",
+ "entrypoints/quick/quick_throw_entrypoints.cc",
+ "entrypoints/quick/quick_trampoline_entrypoints.cc",
+ ],
+
+ arch: {
+ arm: {
+ clang_asflags: ["-no-integrated-as"],
+ srcs: [
+ "interpreter/mterp/mterp.cc",
+ "interpreter/mterp/out/mterp_arm.S",
+ "arch/arm/context_arm.cc",
+ "arch/arm/entrypoints_init_arm.cc",
+ "arch/arm/instruction_set_features_assembly_tests.S",
+ "arch/arm/jni_entrypoints_arm.S",
+ "arch/arm/memcmp16_arm.S",
+ "arch/arm/quick_entrypoints_arm.S",
+ "arch/arm/quick_entrypoints_cc_arm.cc",
+ "arch/arm/thread_arm.cc",
+ "arch/arm/fault_handler_arm.cc",
+ ],
+ },
+ arm64: {
+ srcs: [
+ "interpreter/mterp/mterp.cc",
+ "interpreter/mterp/out/mterp_arm64.S",
+ "arch/arm64/context_arm64.cc",
+ "arch/arm64/entrypoints_init_arm64.cc",
+ "arch/arm64/jni_entrypoints_arm64.S",
+ "arch/arm64/memcmp16_arm64.S",
+ "arch/arm64/quick_entrypoints_arm64.S",
+ "arch/arm64/thread_arm64.cc",
+ "monitor_pool.cc",
+ "arch/arm64/fault_handler_arm64.cc",
+ ],
+ },
+ x86: {
+ srcs: [
+ "interpreter/mterp/mterp.cc",
+ "interpreter/mterp/out/mterp_x86.S",
+ "arch/x86/context_x86.cc",
+ "arch/x86/entrypoints_init_x86.cc",
+ "arch/x86/jni_entrypoints_x86.S",
+ "arch/x86/memcmp16_x86.S",
+ "arch/x86/quick_entrypoints_x86.S",
+ "arch/x86/thread_x86.cc",
+ "arch/x86/fault_handler_x86.cc",
+ ],
+ },
+ x86_64: {
+ srcs: [
+ // Note that the fault_handler_x86.cc is not a mistake. This file is
+ // shared between the x86 and x86_64 architectures.
+ "interpreter/mterp/mterp.cc",
+ "interpreter/mterp/out/mterp_x86_64.S",
+ "arch/x86_64/context_x86_64.cc",
+ "arch/x86_64/entrypoints_init_x86_64.cc",
+ "arch/x86_64/jni_entrypoints_x86_64.S",
+ "arch/x86_64/memcmp16_x86_64.S",
+ "arch/x86_64/quick_entrypoints_x86_64.S",
+ "arch/x86_64/thread_x86_64.cc",
+ "monitor_pool.cc",
+ "arch/x86/fault_handler_x86.cc",
+ ],
+ },
+ mips: {
+ srcs: [
+ "interpreter/mterp/mterp.cc",
+ "interpreter/mterp/out/mterp_mips.S",
+ "arch/mips/context_mips.cc",
+ "arch/mips/entrypoints_init_mips.cc",
+ "arch/mips/jni_entrypoints_mips.S",
+ "arch/mips/memcmp16_mips.S",
+ "arch/mips/quick_entrypoints_mips.S",
+ "arch/mips/thread_mips.cc",
+ "arch/mips/fault_handler_mips.cc",
+ ],
+ },
+ mips64: {
+ srcs: [
+ "interpreter/mterp/mterp.cc",
+ "interpreter/mterp/out/mterp_mips64.S",
+ "arch/mips64/context_mips64.cc",
+ "arch/mips64/entrypoints_init_mips64.cc",
+ "arch/mips64/jni_entrypoints_mips64.S",
+ "arch/mips64/memcmp16_mips64.S",
+ "arch/mips64/quick_entrypoints_mips64.S",
+ "arch/mips64/thread_mips64.cc",
+ "monitor_pool.cc",
+ "arch/mips64/fault_handler_mips64.cc",
+ ],
+ },
+ },
+ target: {
+ android: {
+ srcs: [
+ "jdwp/jdwp_adb.cc",
+ "monitor_android.cc",
+ "runtime_android.cc",
+ "thread_android.cc",
+ ],
+ shared_libs: [
+ "libdl",
+ // For android::FileMap used by libziparchive.
+ "libutils",
+ ],
+ static_libs: [
+ // ZipArchive support, the order matters here to get all symbols.
+ "libziparchive",
+ "libz",
+ "libbase",
+ ],
+ },
+ android_arm: {
+ ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS,
+ },
+ android_arm64: {
+ ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS,
+ },
+ android_x86: {
+ ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS,
+ },
+ android_x86_64: {
+ ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS,
+ },
+ host: {
+ srcs: [
+ "monitor_linux.cc",
+ "runtime_linux.cc",
+ "thread_linux.cc",
+ ],
+ shared_libs: [
+ "libziparchive",
+ "libz-host",
+ ],
+ },
+ },
+ cflags: ["-DBUILDING_LIBART=1"],
+ generated_sources: ["art_operator_srcs"],
+ clang: true,
+ include_dirs: [
+ "art/cmdline",
+ "art/sigchainlib",
+ "art",
+ ],
+ shared_libs: [
+ "libnativehelper",
+ "libnativebridge",
+ "libnativeloader",
+ "libbacktrace",
+ "liblz4",
+ // For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
+ "libcutils",
+ ],
+ static: {
+ static_libs: ["libsigchain_dummy"],
+ },
+ shared: {
+ shared_libs: ["libsigchain"],
+ },
+ export_include_dirs: ["."],
+}
+
+gensrcs {
+ name: "art_operator_srcs",
+ cmd: "art/tools/generate-operator-out.py art/runtime $in > $out",
+ srcs: [
+ "arch/instruction_set.h",
+ "base/allocator.h",
+ "base/enums.h",
+ "base/mutex.h",
+ "debugger.h",
+ "base/unix_file/fd_file.h",
+ "dex_file.h",
+ "dex_instruction.h",
+ "dex_instruction_utils.h",
+ "gc_root.h",
+ "gc/allocator_type.h",
+ "gc/allocator/rosalloc.h",
+ "gc/collector_type.h",
+ "gc/collector/gc_type.h",
+ "gc/heap.h",
+ "gc/space/region_space.h",
+ "gc/space/space.h",
+ "gc/weak_root_state.h",
+ "image.h",
+ "instrumentation.h",
+ "indirect_reference_table.h",
+ "invoke_type.h",
+ "jdwp/jdwp.h",
+ "jdwp/jdwp_constants.h",
+ "lock_word.h",
+ "mirror/class.h",
+ "oat.h",
+ "object_callbacks.h",
+ "process_state.h",
+ "quick/inline_method_analyser.h",
+ "runtime.h",
+ "stack.h",
+ "thread.h",
+ "thread_state.h",
+ "ti/agent.h",
+ "verifier/method_verifier.h",
+ ],
+ output_extension: "operator_out.cc",
+}
+
+// We always build dex2oat and dependencies, even if the host build is otherwise disabled, since
+// they are used to cross compile for the target.
+
+art_cc_library {
+ name: "libart",
+ defaults: ["libart_defaults"],
+}
+
+art_cc_library {
+ name: "libartd",
+ defaults: [
+ "libart_defaults",
+ "art_debug_defaults",
+ ],
+ // Leave the symbols in the shared library so that stack unwinders can
+ // produce meaningful name resolution.
+ strip: {
+ keep_symbols: true,
+ },
+}
+
+subdirs = [
+ "openjdkjvm",
+ "openjdkjvmti",
+ "simulator",
+]
diff --git a/runtime/Android.mk b/runtime/Android.mk
deleted file mode 100644
index 0e50eeb..0000000
--- a/runtime/Android.mk
+++ /dev/null
@@ -1,657 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-include art/build/Android.common_build.mk
-
-LIBART_COMMON_SRC_FILES := \
- art_field.cc \
- art_method.cc \
- atomic.cc.arm \
- barrier.cc \
- base/allocator.cc \
- base/arena_allocator.cc \
- base/arena_bit_vector.cc \
- base/bit_vector.cc \
- base/file_magic.cc \
- base/hex_dump.cc \
- base/logging.cc \
- base/mutex.cc \
- base/scoped_arena_allocator.cc \
- base/scoped_flock.cc \
- base/stringpiece.cc \
- base/stringprintf.cc \
- base/time_utils.cc \
- base/timing_logger.cc \
- base/unix_file/fd_file.cc \
- base/unix_file/random_access_file_utils.cc \
- check_jni.cc \
- class_linker.cc \
- class_table.cc \
- code_simulator_container.cc \
- common_throws.cc \
- compiler_filter.cc \
- debugger.cc \
- dex_file.cc \
- dex_file_verifier.cc \
- dex_instruction.cc \
- elf_file.cc \
- fault_handler.cc \
- gc/allocation_record.cc \
- gc/allocator/dlmalloc.cc \
- gc/allocator/rosalloc.cc \
- gc/accounting/bitmap.cc \
- gc/accounting/card_table.cc \
- gc/accounting/heap_bitmap.cc \
- gc/accounting/mod_union_table.cc \
- gc/accounting/remembered_set.cc \
- gc/accounting/space_bitmap.cc \
- gc/collector/concurrent_copying.cc \
- gc/collector/garbage_collector.cc \
- gc/collector/immune_region.cc \
- gc/collector/immune_spaces.cc \
- gc/collector/mark_compact.cc \
- gc/collector/mark_sweep.cc \
- gc/collector/partial_mark_sweep.cc \
- gc/collector/semi_space.cc \
- gc/collector/sticky_mark_sweep.cc \
- gc/gc_cause.cc \
- gc/heap.cc \
- gc/reference_processor.cc \
- gc/reference_queue.cc \
- gc/scoped_gc_critical_section.cc \
- gc/space/bump_pointer_space.cc \
- gc/space/dlmalloc_space.cc \
- gc/space/image_space.cc \
- gc/space/large_object_space.cc \
- gc/space/malloc_space.cc \
- gc/space/region_space.cc \
- gc/space/rosalloc_space.cc \
- gc/space/space.cc \
- gc/space/zygote_space.cc \
- gc/task_processor.cc \
- hprof/hprof.cc \
- image.cc \
- indirect_reference_table.cc \
- instrumentation.cc \
- intern_table.cc \
- interpreter/interpreter.cc \
- interpreter/interpreter_common.cc \
- interpreter/interpreter_goto_table_impl.cc \
- interpreter/interpreter_switch_impl.cc \
- interpreter/unstarted_runtime.cc \
- java_vm_ext.cc \
- jdwp/jdwp_event.cc \
- jdwp/jdwp_expand_buf.cc \
- jdwp/jdwp_handler.cc \
- jdwp/jdwp_main.cc \
- jdwp/jdwp_request.cc \
- jdwp/jdwp_socket.cc \
- jdwp/object_registry.cc \
- jni_env_ext.cc \
- jit/debugger_interface.cc \
- jit/jit.cc \
- jit/jit_code_cache.cc \
- jit/offline_profiling_info.cc \
- jit/profiling_info.cc \
- jit/profile_saver.cc \
- jni_internal.cc \
- jobject_comparator.cc \
- linear_alloc.cc \
- mem_map.cc \
- memory_region.cc \
- mirror/abstract_method.cc \
- mirror/array.cc \
- mirror/class.cc \
- mirror/dex_cache.cc \
- mirror/field.cc \
- mirror/method.cc \
- mirror/object.cc \
- mirror/reference.cc \
- mirror/stack_trace_element.cc \
- mirror/string.cc \
- mirror/throwable.cc \
- monitor.cc \
- native_bridge_art_interface.cc \
- native_stack_dump.cc \
- native/dalvik_system_DexFile.cc \
- native/dalvik_system_VMDebug.cc \
- native/dalvik_system_VMRuntime.cc \
- native/dalvik_system_VMStack.cc \
- native/dalvik_system_ZygoteHooks.cc \
- native/java_lang_Class.cc \
- native/java_lang_DexCache.cc \
- native/java_lang_Object.cc \
- native/java_lang_String.cc \
- native/java_lang_StringFactory.cc \
- native/java_lang_System.cc \
- native/java_lang_Thread.cc \
- native/java_lang_Throwable.cc \
- native/java_lang_VMClassLoader.cc \
- native/java_lang_ref_FinalizerReference.cc \
- native/java_lang_ref_Reference.cc \
- native/java_lang_reflect_AbstractMethod.cc \
- native/java_lang_reflect_Array.cc \
- native/java_lang_reflect_Constructor.cc \
- native/java_lang_reflect_Field.cc \
- native/java_lang_reflect_Method.cc \
- native/java_lang_reflect_Proxy.cc \
- native/java_util_concurrent_atomic_AtomicLong.cc \
- native/libcore_util_CharsetUtils.cc \
- native/org_apache_harmony_dalvik_ddmc_DdmServer.cc \
- native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc \
- native/sun_misc_Unsafe.cc \
- oat.cc \
- oat_file.cc \
- oat_file_assistant.cc \
- oat_file_manager.cc \
- oat_quick_method_header.cc \
- object_lock.cc \
- offsets.cc \
- os_linux.cc \
- parsed_options.cc \
- plugin.cc \
- primitive.cc \
- quick_exception_handler.cc \
- quick/inline_method_analyser.cc \
- reference_table.cc \
- reflection.cc \
- runtime.cc \
- runtime_options.cc \
- signal_catcher.cc \
- stack.cc \
- stack_map.cc \
- thread.cc \
- thread_list.cc \
- thread_pool.cc \
- ti/agent.cc \
- trace.cc \
- transaction.cc \
- type_lookup_table.cc \
- utf.cc \
- utils.cc \
- verifier/instruction_flags.cc \
- verifier/method_verifier.cc \
- verifier/reg_type.cc \
- verifier/reg_type_cache.cc \
- verifier/register_line.cc \
- well_known_classes.cc \
- zip_archive.cc
-
-LIBART_COMMON_SRC_FILES += \
- arch/context.cc \
- arch/instruction_set.cc \
- arch/instruction_set_features.cc \
- arch/memcmp16.cc \
- arch/arm/instruction_set_features_arm.cc \
- arch/arm/registers_arm.cc \
- arch/arm64/instruction_set_features_arm64.cc \
- arch/arm64/registers_arm64.cc \
- arch/mips/instruction_set_features_mips.cc \
- arch/mips/registers_mips.cc \
- arch/mips64/instruction_set_features_mips64.cc \
- arch/mips64/registers_mips64.cc \
- arch/x86/instruction_set_features_x86.cc \
- arch/x86/registers_x86.cc \
- arch/x86_64/registers_x86_64.cc \
- entrypoints/entrypoint_utils.cc \
- entrypoints/jni/jni_entrypoints.cc \
- entrypoints/math_entrypoints.cc \
- entrypoints/quick/quick_alloc_entrypoints.cc \
- entrypoints/quick/quick_cast_entrypoints.cc \
- entrypoints/quick/quick_deoptimization_entrypoints.cc \
- entrypoints/quick/quick_dexcache_entrypoints.cc \
- entrypoints/quick/quick_field_entrypoints.cc \
- entrypoints/quick/quick_fillarray_entrypoints.cc \
- entrypoints/quick/quick_instrumentation_entrypoints.cc \
- entrypoints/quick/quick_jni_entrypoints.cc \
- entrypoints/quick/quick_lock_entrypoints.cc \
- entrypoints/quick/quick_math_entrypoints.cc \
- entrypoints/quick/quick_thread_entrypoints.cc \
- entrypoints/quick/quick_throw_entrypoints.cc \
- entrypoints/quick/quick_trampoline_entrypoints.cc
-
-LIBART_TARGET_LDFLAGS :=
-LIBART_HOST_LDFLAGS :=
-
-# Keep the __jit_debug_register_code symbol as a unique symbol during ICF for architectures where
-# we use gold as the linker (arm, x86, x86_64). The symbol is used by the debuggers to detect when
-# new jit code is generated. We don't want it to be called when a different function with the same
-# (empty) body is called.
-JIT_DEBUG_REGISTER_CODE_LDFLAGS := -Wl,--keep-unique,__jit_debug_register_code
-LIBART_TARGET_LDFLAGS_arm := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
-LIBART_TARGET_LDFLAGS_arm64 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
-LIBART_TARGET_LDFLAGS_x86 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
-LIBART_TARGET_LDFLAGS_x86_64 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
-JIT_DEBUG_REGISTER_CODE_LDFLAGS :=
-
-LIBART_TARGET_SRC_FILES := \
- $(LIBART_COMMON_SRC_FILES) \
- jdwp/jdwp_adb.cc \
- monitor_android.cc \
- runtime_android.cc \
- thread_android.cc
-
-LIBART_TARGET_SRC_FILES_arm := \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_arm.S \
- arch/arm/context_arm.cc.arm \
- arch/arm/entrypoints_init_arm.cc \
- arch/arm/instruction_set_features_assembly_tests.S \
- arch/arm/jni_entrypoints_arm.S \
- arch/arm/memcmp16_arm.S \
- arch/arm/quick_entrypoints_arm.S \
- arch/arm/quick_entrypoints_cc_arm.cc \
- arch/arm/thread_arm.cc \
- arch/arm/fault_handler_arm.cc
-
-LIBART_TARGET_SRC_FILES_arm64 := \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_arm64.S \
- arch/arm64/context_arm64.cc \
- arch/arm64/entrypoints_init_arm64.cc \
- arch/arm64/jni_entrypoints_arm64.S \
- arch/arm64/memcmp16_arm64.S \
- arch/arm64/quick_entrypoints_arm64.S \
- arch/arm64/thread_arm64.cc \
- monitor_pool.cc \
- arch/arm64/fault_handler_arm64.cc
-
-LIBART_SRC_FILES_x86 := \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_x86.S \
- arch/x86/context_x86.cc \
- arch/x86/entrypoints_init_x86.cc \
- arch/x86/jni_entrypoints_x86.S \
- arch/x86/memcmp16_x86.S \
- arch/x86/quick_entrypoints_x86.S \
- arch/x86/thread_x86.cc \
- arch/x86/fault_handler_x86.cc
-
-LIBART_TARGET_SRC_FILES_x86 := \
- $(LIBART_SRC_FILES_x86)
-
-# Note that the fault_handler_x86.cc is not a mistake. This file is
-# shared between the x86 and x86_64 architectures.
-LIBART_SRC_FILES_x86_64 := \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_x86_64.S \
- arch/x86_64/context_x86_64.cc \
- arch/x86_64/entrypoints_init_x86_64.cc \
- arch/x86_64/jni_entrypoints_x86_64.S \
- arch/x86_64/memcmp16_x86_64.S \
- arch/x86_64/quick_entrypoints_x86_64.S \
- arch/x86_64/thread_x86_64.cc \
- monitor_pool.cc \
- arch/x86/fault_handler_x86.cc
-
-LIBART_TARGET_SRC_FILES_x86_64 := \
- $(LIBART_SRC_FILES_x86_64) \
-
-LIBART_TARGET_SRC_FILES_mips := \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_mips.S \
- arch/mips/context_mips.cc \
- arch/mips/entrypoints_init_mips.cc \
- arch/mips/jni_entrypoints_mips.S \
- arch/mips/memcmp16_mips.S \
- arch/mips/quick_entrypoints_mips.S \
- arch/mips/thread_mips.cc \
- arch/mips/fault_handler_mips.cc
-
-LIBART_TARGET_SRC_FILES_mips64 := \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_mips64.S \
- arch/mips64/context_mips64.cc \
- arch/mips64/entrypoints_init_mips64.cc \
- arch/mips64/jni_entrypoints_mips64.S \
- arch/mips64/memcmp16_mips64.S \
- arch/mips64/quick_entrypoints_mips64.S \
- arch/mips64/thread_mips64.cc \
- monitor_pool.cc \
- arch/mips64/fault_handler_mips64.cc
-
-LIBART_HOST_SRC_FILES := \
- $(LIBART_COMMON_SRC_FILES) \
- monitor_linux.cc \
- runtime_linux.cc \
- thread_linux.cc
-
-LIBART_HOST_SRC_FILES_32 := \
- $(LIBART_SRC_FILES_x86)
-
-LIBART_HOST_SRC_FILES_64 := \
- $(LIBART_SRC_FILES_x86_64)
-
-LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
- arch/instruction_set.h \
- base/allocator.h \
- base/enums.h \
- base/mutex.h \
- debugger.h \
- base/unix_file/fd_file.h \
- dex_file.h \
- dex_instruction.h \
- dex_instruction_utils.h \
- gc_root.h \
- gc/allocator_type.h \
- gc/allocator/rosalloc.h \
- gc/collector_type.h \
- gc/collector/gc_type.h \
- gc/heap.h \
- gc/space/region_space.h \
- gc/space/space.h \
- gc/weak_root_state.h \
- image.h \
- instrumentation.h \
- indirect_reference_table.h \
- invoke_type.h \
- jdwp/jdwp.h \
- jdwp/jdwp_constants.h \
- lock_word.h \
- mirror/class.h \
- oat.h \
- object_callbacks.h \
- process_state.h \
- quick/inline_method_analyser.h \
- runtime.h \
- stack.h \
- thread.h \
- thread_state.h \
- ti/agent.h \
- verifier/method_verifier.h
-
-LIBOPENJDKJVM_SRC_FILES := openjdkjvm/OpenjdkJvm.cc
-LIBOPENJDKJVMTI_SRC_FILES := openjdkjvmti/OpenjdkJvmTi.cc
-
-LIBART_CFLAGS := -DBUILDING_LIBART=1
-
-LIBART_TARGET_CFLAGS :=
-LIBART_HOST_CFLAGS :=
-
-# $(1): target or host
-# $(2): ndebug or debug
-# $(3): static or shared (note that static only applies for host)
-# $(4): module name : either libart, libopenjdkjvm, or libopenjdkjvmti
-define build-runtime-library
- ifneq ($(1),target)
- ifneq ($(1),host)
- $$(error expected target or host for argument 1, received $(1))
- endif
- endif
- ifneq ($(2),ndebug)
- ifneq ($(2),debug)
- $$(error expected ndebug or debug for argument 2, received $(2))
- endif
- endif
- ifneq ($(4),libart)
- ifneq ($(4),libopenjdkjvm)
- ifneq ($(4),libopenjdkjvmti)
- $$(error expected libart, libopenjdkjvmti, or libopenjdkjvm for argument 4, received $(4))
- endif
- endif
- endif
-
- art_target_or_host := $(1)
- art_ndebug_or_debug := $(2)
- art_static_or_shared := $(3)
-
- include $$(CLEAR_VARS)
- LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_MODULE := $(4)
- ifeq ($$(art_target_or_host),target)
- LOCAL_FDO_SUPPORT := true
- endif
- else # debug
- LOCAL_MODULE := $(4)d
- endif
-
- LOCAL_MODULE_TAGS := optional
-
- ifeq ($$(art_static_or_shared),static)
- LOCAL_MODULE_CLASS := STATIC_LIBRARIES
- else
- LOCAL_MODULE_CLASS := SHARED_LIBRARIES
- endif
-
- ifeq ($(4),libart)
- ifeq ($$(art_target_or_host),target)
- LOCAL_SRC_FILES := $$(LIBART_TARGET_SRC_FILES)
- $$(foreach arch,$$(ART_TARGET_SUPPORTED_ARCH), \
- $$(eval LOCAL_SRC_FILES_$$(arch) := $$$$(LIBART_TARGET_SRC_FILES_$$(arch))))
- else # host
- LOCAL_SRC_FILES := $$(LIBART_HOST_SRC_FILES)
- LOCAL_SRC_FILES_32 := $$(LIBART_HOST_SRC_FILES_32)
- LOCAL_SRC_FILES_64 := $$(LIBART_HOST_SRC_FILES_64)
- LOCAL_IS_HOST_MODULE := true
- endif
- else
- ifeq ($(4),libopenjdkjvmti)
- LOCAL_SRC_FILES := $$(LIBOPENJDKJVMTI_SRC_FILES)
- else # libopenjdkjvm
- LOCAL_SRC_FILES := $$(LIBOPENJDKJVM_SRC_FILES)
- endif
- ifeq ($$(art_target_or_host),host)
- LOCAL_IS_HOST_MODULE := true
- endif
- endif
-
-ifeq ($(4),libart)
- GENERATED_SRC_DIR := $$(call local-generated-sources-dir)
- ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,$$(LIBART_ENUM_OPERATOR_OUT_HEADER_FILES))
- ENUM_OPERATOR_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/,$$(ENUM_OPERATOR_OUT_CC_FILES))
-
-$$(ENUM_OPERATOR_OUT_GEN): art/tools/generate-operator-out.py
-$$(ENUM_OPERATOR_OUT_GEN): PRIVATE_CUSTOM_TOOL = art/tools/generate-operator-out.py $(LOCAL_PATH) $$< > $$@
-$$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PATH)/%.h
- $$(transform-generated-source)
-
- LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
-endif
-
- LOCAL_CFLAGS := $$(LIBART_CFLAGS)
- LOCAL_LDFLAGS := $$(LIBART_LDFLAGS)
- ifeq ($$(art_target_or_host),target)
- LOCAL_CFLAGS += $$(LIBART_TARGET_CFLAGS)
- LOCAL_LDFLAGS += $$(LIBART_TARGET_LDFLAGS)
- $$(foreach arch,$$(ART_TARGET_SUPPORTED_ARCH), \
- $$(eval LOCAL_LDFLAGS_$$(arch) := $$(LIBART_TARGET_LDFLAGS_$$(arch))))
- else #host
- LOCAL_CFLAGS += $$(LIBART_HOST_CFLAGS)
- LOCAL_LDFLAGS += $$(LIBART_HOST_LDFLAGS)
- ifeq ($$(art_static_or_shared),static)
- LOCAL_LDFLAGS += -static
- endif
- endif
-
- # Clang usage
- ifeq ($$(art_target_or_host),target)
- $$(eval LOCAL_CLANG := $$(ART_TARGET_CLANG))
- $$(eval $$(call set-target-local-cflags-vars,$(2)))
- LOCAL_ASFLAGS_arm += -no-integrated-as
- else # host
- LOCAL_CLANG := $$(ART_HOST_CLANG)
- LOCAL_LDLIBS += -ldl -lpthread
- ifeq ($$(HOST_OS),linux)
- LOCAL_LDLIBS += -lrt
- endif
- LOCAL_CFLAGS += $$(ART_HOST_CFLAGS)
- LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS)
-
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_CFLAGS += $$(ART_HOST_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $$(ART_HOST_DEBUG_ASFLAGS)
- else
- LOCAL_CFLAGS += $$(ART_HOST_NON_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $$(ART_HOST_NON_DEBUG_ASFLAGS)
- endif
- LOCAL_MULTILIB := both
- endif
-
- LOCAL_C_INCLUDES += $$(ART_C_INCLUDES)
- LOCAL_C_INCLUDES += art/cmdline
- LOCAL_C_INCLUDES += art/sigchainlib
- LOCAL_C_INCLUDES += art
-
- ifeq ($$(art_static_or_shared),static)
- LOCAL_STATIC_LIBRARIES := libnativehelper
- LOCAL_STATIC_LIBRARIES += libnativebridge
- LOCAL_STATIC_LIBRARIES += libnativeloader
- LOCAL_STATIC_LIBRARIES += libsigchain_dummy
- LOCAL_STATIC_LIBRARIES += libbacktrace
- LOCAL_STATIC_LIBRARIES += liblz4
- else
- LOCAL_SHARED_LIBRARIES := libnativehelper
- LOCAL_SHARED_LIBRARIES += libnativebridge
- LOCAL_SHARED_LIBRARIES += libnativeloader
- LOCAL_SHARED_LIBRARIES += libsigchain
- LOCAL_SHARED_LIBRARIES += libbacktrace
- LOCAL_SHARED_LIBRARIES += liblz4
- endif
-
- ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libdl
- # ZipArchive support, the order matters here to get all symbols.
- LOCAL_STATIC_LIBRARIES := libziparchive libz libbase
- # For android::FileMap used by libziparchive.
- LOCAL_SHARED_LIBRARIES += libutils
- # For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
- LOCAL_SHARED_LIBRARIES += libcutils
- else # host
- ifeq ($$(art_static_or_shared),static)
- LOCAL_STATIC_LIBRARIES += libziparchive-host libz
- # For ashmem_create_region.
- LOCAL_STATIC_LIBRARIES += libcutils
- else
- LOCAL_SHARED_LIBRARIES += libziparchive-host libz-host
- # For ashmem_create_region.
- LOCAL_SHARED_LIBRARIES += libcutils
- endif
- endif
-
- ifeq ($(4),libopenjdkjvm)
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_SHARED_LIBRARIES += libart
- else
- LOCAL_SHARED_LIBRARIES += libartd
- endif
- LOCAL_NOTICE_FILE := $(LOCAL_PATH)/openjdkjvm/NOTICE
- else
- ifeq ($(4),libopenjdkjvmti)
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_SHARED_LIBRARIES += libart
- else
- LOCAL_SHARED_LIBRARIES += libartd
- endif
- LOCAL_NOTICE_FILE := $(LOCAL_PATH)/openjdkjvmti/NOTICE
- endif
- endif
- LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
- LOCAL_ADDITIONAL_DEPENDENCIES += $$(LOCAL_PATH)/Android.mk
-
- ifeq ($$(art_target_or_host),target)
- LOCAL_MODULE_TARGET_ARCH := $$(ART_TARGET_SUPPORTED_ARCH)
- endif
-
- LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
-
- ifeq ($$(art_target_or_host),target)
- ifneq ($$(art_ndebug_or_debug),debug)
- # Leave the symbols in the shared library so that stack unwinders can
- # produce meaningful name resolution.
- LOCAL_STRIP_MODULE := keep_symbols
- endif
- include $$(BUILD_SHARED_LIBRARY)
- else # host
- ifeq ($$(art_static_or_shared),static)
- include $$(BUILD_HOST_STATIC_LIBRARY)
- else
- include $$(BUILD_HOST_SHARED_LIBRARY)
- endif
- endif
-
- # Clear locally defined variables.
- GENERATED_SRC_DIR :=
- ENUM_OPERATOR_OUT_CC_FILES :=
- ENUM_OPERATOR_OUT_GEN :=
- art_target_or_host :=
- art_ndebug_or_debug :=
- art_static_or_shared :=
-endef
-
-# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since
-# they are used to cross compile for the target.
-ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-runtime-library,host,ndebug,shared,libart))
- $(eval $(call build-runtime-library,host,ndebug,shared,libopenjdkjvm))
- $(eval $(call build-runtime-library,host,ndebug,shared,libopenjdkjvmti))
- ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-runtime-library,host,ndebug,static,libart))
- $(eval $(call build-runtime-library,host,ndebug,static,libopenjdkjvm))
- $(eval $(call build-runtime-library,host,ndebug,static,libopenjdkjvmti))
- endif
-endif
-ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-runtime-library,host,debug,shared,libart))
- $(eval $(call build-runtime-library,host,debug,shared,libopenjdkjvm))
- $(eval $(call build-runtime-library,host,debug,shared,libopenjdkjvmti))
- ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-runtime-library,host,debug,static,libart))
- $(eval $(call build-runtime-library,host,debug,static,libopenjdkjvm))
- $(eval $(call build-runtime-library,host,debug,static,libopenjdkjvmti))
- endif
-endif
-
-ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
-# $(error $(call build-runtime-library,target,ndebug))
- $(eval $(call build-runtime-library,target,ndebug,shared,libart))
- $(eval $(call build-runtime-library,target,ndebug,shared,libopenjdkjvm))
- $(eval $(call build-runtime-library,target,ndebug,shared,libopenjdkjvmti))
-endif
-ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-runtime-library,target,debug,shared,libart))
- $(eval $(call build-runtime-library,target,debug,shared,libopenjdkjvm))
- $(eval $(call build-runtime-library,target,debug,shared,libopenjdkjvmti))
-endif
-
-# Clear locally defined variables.
-LOCAL_PATH :=
-LIBART_COMMON_SRC_FILES :=
-LIBART_HOST_LDFLAGS :=
-LIBART_TARGET_LDFLAGS :=
-LIBART_TARGET_LDFLAGS_arm :=
-LIBART_TARGET_LDFLAGS_arm64 :=
-LIBART_TARGET_LDFLAGS_x86 :=
-LIBART_TARGET_LDFLAGS_x86_64 :=
-LIBART_TARGET_LDFLAGS_mips :=
-LIBART_TARGET_LDFLAGS_mips64 :=
-LIBART_TARGET_SRC_FILES :=
-LIBART_TARGET_SRC_FILES_arm :=
-LIBART_TARGET_SRC_FILES_arm64 :=
-LIBART_TARGET_SRC_FILES_x86 :=
-LIBART_TARGET_SRC_FILES_x86_64 :=
-LIBART_TARGET_SRC_FILES_mips :=
-LIBART_TARGET_SRC_FILES_mips64 :=
-LIBART_HOST_SRC_FILES :=
-LIBART_HOST_SRC_FILES_32 :=
-LIBART_HOST_SRC_FILES_64 :=
-LIBART_ENUM_OPERATOR_OUT_HEADER_FILES :=
-LIBART_CFLAGS :=
-LIBART_TARGET_CFLAGS :=
-LIBART_HOST_CFLAGS :=
-build-runtime-library :=
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 651669e..85d307b 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -202,8 +202,10 @@
immune_spaces_.Reset();
bytes_moved_.StoreRelaxed(0);
objects_moved_.StoreRelaxed(0);
- if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
- GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
+ GcCause gc_cause = GetCurrentIteration()->GetGcCause();
+ if (gc_cause == kGcCauseExplicit ||
+ gc_cause == kGcCauseForNativeAlloc ||
+ gc_cause == kGcCauseCollectorTransition ||
GetCurrentIteration()->GetClearSoftReferences()) {
force_evacuate_all_ = true;
} else {
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 7899a7c..b342cc7 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -40,6 +40,8 @@
kCollectorTypeHeapTrim,
// A (mostly) concurrent copying collector.
kCollectorTypeCC,
+ // The background compaction of the concurrent copying collector.
+ kCollectorTypeCCBackground,
// Instrumentation critical section fake collector.
kCollectorTypeInstrumentation,
// Fake collector for adding or removing application image spaces.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 638c1d8..9e454ca 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -731,6 +731,7 @@
}
void Heap::DisableMovingGc() {
+ CHECK(!kUseReadBarrier);
if (IsMovingGc(foreground_collector_type_)) {
foreground_collector_type_ = kCollectorTypeCMS;
}
@@ -970,7 +971,8 @@
// Don't delay for debug builds since we may want to stress test the GC.
// If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
// special handling which does a homogenous space compaction once but then doesn't transition
- // the collector.
+ // the collector. Similarly, we invoke a full compaction for kCollectorTypeCC but don't
+ // transition the collector.
RequestCollectorTransition(background_collector_type_,
kIsDebugBuild ? 0 : kCollectorTransitionWait);
}
@@ -1384,6 +1386,16 @@
} else {
VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
}
+ } else if (desired_collector_type == kCollectorTypeCCBackground) {
+ DCHECK(kUseReadBarrier);
+ if (!CareAboutPauseTimes()) {
+ // Invoke CC full compaction.
+ CollectGarbageInternal(collector::kGcTypeFull,
+ kGcCauseCollectorTransition,
+ /*clear_soft_references*/false);
+ } else {
+ VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
+ }
} else {
TransitionCollector(desired_collector_type);
}
@@ -1841,6 +1853,10 @@
break;
}
case kAllocatorTypeNonMoving: {
+ if (kUseReadBarrier) {
+ // DisableMovingGc() isn't compatible with CC.
+ break;
+ }
// Try to transition the heap if the allocation failure was due to the space being full.
if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
// If we aren't out of memory then the OOM was probably from the non moving space being
@@ -2109,6 +2125,8 @@
}
void Heap::TransitionCollector(CollectorType collector_type) {
+ // Collector transition must not happen with CC
+ CHECK(!kUseReadBarrier);
if (collector_type == collector_type_) {
return;
}
@@ -3798,6 +3816,12 @@
if (desired_collector_type_ == collector_type_ || !CanAddHeapTask(self)) {
return;
}
+ if (collector_type_ == kCollectorTypeCC) {
+ // For CC, we invoke a full compaction when going to the background, but the collector type
+ // doesn't change.
+ DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
+ }
+ DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
CollectorTransitionTask* added_task = nullptr;
const uint64_t target_time = NanoTime() + delta_time;
{
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index be8ed40..b357b87 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -537,7 +537,7 @@
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
// Do a pending collector transition.
- void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_);
+ void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
// Deflate monitors, ... and trim the spaces.
void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
@@ -708,8 +708,6 @@
if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
// Assume no transition when a concurrent moving collector is used.
DCHECK_EQ(collector_type_, foreground_collector_type_);
- DCHECK_EQ(foreground_collector_type_, background_collector_type_)
- << "Assume no transition such that collector_type_ won't change";
return true;
}
return false;
@@ -828,6 +826,7 @@
collector_type == kCollectorTypeSS ||
collector_type == kCollectorTypeGSS ||
collector_type == kCollectorTypeCC ||
+ collector_type == kCollectorTypeCCBackground ||
collector_type == kCollectorTypeMC ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
@@ -997,7 +996,9 @@
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
bool IsGcConcurrent() const ALWAYS_INLINE {
- return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
+ return collector_type_ == kCollectorTypeCMS ||
+ collector_type_ == kCollectorTypeCC ||
+ collector_type_ == kCollectorTypeCCBackground;
}
// Trim the managed and native spaces by releasing unused memory back to the OS.
diff --git a/runtime/openjdkjvm/Android.bp b/runtime/openjdkjvm/Android.bp
new file mode 100644
index 0000000..3e8dc8c
--- /dev/null
+++ b/runtime/openjdkjvm/Android.bp
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_defaults {
+ name: "libopenjdkjvm_defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ srcs: ["OpenjdkJvm.cc"],
+ include_dirs: ["art/runtime"],
+ shared_libs: ["libnativehelper"],
+}
+
+art_cc_library {
+ name: "libopenjdkjvm",
+ defaults: ["libopenjdkjvm_defaults"],
+ shared_libs: ["libart"],
+}
+
+art_cc_library {
+ name: "libopenjdkjvmd",
+ defaults: [
+ "libopenjdkjvm_defaults",
+ "art_debug_defaults",
+ ],
+ shared_libs: ["libartd"],
+}
diff --git a/runtime/openjdkjvm/Android.mk b/runtime/openjdkjvm/Android.mk
deleted file mode 100644
index 9b7404e..0000000
--- a/runtime/openjdkjvm/Android.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := openjdkjvm-phony
-include $(BUILD_PHONY_PACKAGE)
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
new file mode 100644
index 0000000..4430248
--- /dev/null
+++ b/runtime/openjdkjvmti/Android.bp
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_defaults {
+ name: "libopenjdkjvmti_defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ srcs: ["OpenjdkJvmTi.cc"],
+ include_dirs: ["art/runtime"],
+ shared_libs: ["libnativehelper"],
+}
+
+art_cc_library {
+ name: "libopenjdkjvmti",
+ defaults: ["libopenjdkjvmti_defaults"],
+ shared_libs: ["libart"],
+}
+
+art_cc_library {
+ name: "libopenjdkjvmtid",
+ defaults: [
+ "libopenjdkjvmti_defaults",
+ "art_debug_defaults",
+ ],
+ shared_libs: ["libartd"],
+}
diff --git a/runtime/openjdkjvmti/Android.mk b/runtime/openjdkjvmti/Android.mk
deleted file mode 100644
index 1de20e8..0000000
--- a/runtime/openjdkjvmti/Android.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := openjdkjvmti-phony
-include $(BUILD_PHONY_PACKAGE)
diff --git a/runtime/simulator/Android.bp b/runtime/simulator/Android.bp
new file mode 100644
index 0000000..05f44e3
--- /dev/null
+++ b/runtime/simulator/Android.bp
@@ -0,0 +1,51 @@
+//
+// Copyright (C) 2015 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+ name: "libart_simulator_defaults",
+ defaults: ["art_defaults"],
+ srcs: [
+ "code_simulator.cc",
+ "code_simulator_arm64.cc",
+ ],
+ shared_libs: [
+ "liblog",
+ ],
+ cflags: ["-DVIXL_INCLUDE_SIMULATOR_AARCH64"],
+ export_include_dirs: ["."],
+ include_dirs: ["art/runtime"],
+}
+
+cc_library_host_shared {
+ name: "libart-simulator",
+ defaults: ["libart_simulator_defaults"],
+ shared_libs: [
+ "libart",
+ "libvixl-arm64",
+ ],
+}
+
+cc_library_host_shared {
+ name: "libart-simulatord",
+ defaults: [
+ "libart_simulator_defaults",
+ "art_debug_defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ "libvixld-arm64",
+ ],
+}
diff --git a/runtime/simulator/Android.mk b/runtime/simulator/Android.mk
deleted file mode 100644
index e39af2d..0000000
--- a/runtime/simulator/Android.mk
+++ /dev/null
@@ -1,111 +0,0 @@
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-include art/build/Android.common_build.mk
-
-LIBART_SIMULATOR_SRC_FILES := \
- code_simulator.cc \
- code_simulator_arm64.cc
-
-LIBART_SIMULATOR_CFLAGS := \
- -DVIXL_INCLUDE_SIMULATOR_AARCH64
-
-# $(1): target or host
-# $(2): ndebug or debug
-define build-libart-simulator
- ifneq ($(1),target)
- ifneq ($(1),host)
- $$(error expected target or host for argument 1, received $(1))
- endif
- endif
- ifneq ($(2),ndebug)
- ifneq ($(2),debug)
- $$(error expected ndebug or debug for argument 2, received $(2))
- endif
- endif
-
- art_target_or_host := $(1)
- art_ndebug_or_debug := $(2)
-
- include $(CLEAR_VARS)
- ifeq ($$(art_target_or_host),host)
- LOCAL_IS_HOST_MODULE := true
- endif
- LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_MODULE := libart-simulator
- else # debug
- LOCAL_MODULE := libartd-simulator
- endif
-
- LOCAL_MODULE_TAGS := optional
- LOCAL_MODULE_CLASS := SHARED_LIBRARIES
-
- LOCAL_SRC_FILES := $$(LIBART_SIMULATOR_SRC_FILES)
- LOCAL_CFLAGS := $$(LIBART_SIMULATOR_CFLAGS)
-
- ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
- $(call set-target-local-cflags-vars,$(2))
- else # host
- LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
- LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_DEBUG_ASFLAGS)
- else
- LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS)
- endif
- endif
-
- LOCAL_SHARED_LIBRARIES += liblog
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libartd
- else
- LOCAL_SHARED_LIBRARIES += libart
- endif
-
- LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
- LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
- LOCAL_MULTILIB := both
-
- LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
- LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
- LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
- # For simulator_arm64.
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixld-arm64
- else
- LOCAL_SHARED_LIBRARIES += libvixl-arm64
- endif
- ifeq ($$(art_target_or_host),target)
- include $(BUILD_SHARED_LIBRARY)
- else # host
- include $(BUILD_HOST_SHARED_LIBRARY)
- endif
-endef
-
-ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-libart-simulator,host,ndebug))
-endif
-ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-libart-simulator,host,debug))
-endif
diff --git a/sigchainlib/Android.bp b/sigchainlib/Android.bp
new file mode 100644
index 0000000..08af254
--- /dev/null
+++ b/sigchainlib/Android.bp
@@ -0,0 +1,53 @@
+//
+// Copyright (C) 2014 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library {
+ name: "libsigchain",
+ host_supported: true,
+ defaults: ["art_defaults"],
+ shared: {
+ srcs: ["sigchain_dummy.cc"],
+ },
+ static: {
+ srcs: ["sigchain.cc"],
+ },
+ target: {
+ host: {
+ host_ldlibs: ["-ldl"],
+ },
+ android: {
+ shared_libs: ["liblog"],
+ },
+ },
+}
+
+// Create a dummy version of libsigchain which expose the necessary symbols
+// but throws when called. This can be used to get static binaries which don't
+// need the real functionality of the sig chain but need to please the linker.
+cc_library_static {
+ name: "libsigchain_dummy",
+ host_supported: true,
+ defaults: ["art_defaults"],
+ srcs: ["sigchain_dummy.cc"],
+ target: {
+ host: {
+ host_ldlibs: ["-ldl"],
+ },
+ android: {
+ shared_libs: ["liblog"],
+ },
+ },
+}
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
deleted file mode 100644
index e1120e4..0000000
--- a/sigchainlib/Android.mk
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH:= $(call my-dir)
-
-include art/build/Android.common_build.mk
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
-LOCAL_MODULE_TAGS := optional
-LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
-LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
-LOCAL_SRC_FILES := sigchain_dummy.cc
-LOCAL_CLANG := $(ART_TARGET_CLANG)
-LOCAL_MODULE:= libsigchain
-LOCAL_SHARED_LIBRARIES := liblog
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
-LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
-$(eval $(call set-target-local-clang-vars))
-include $(BUILD_SHARED_LIBRARY)
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
-LOCAL_MODULE_TAGS := optional
-LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
-LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
-LOCAL_SRC_FILES := sigchain.cc
-LOCAL_CLANG := $(ART_TARGET_CLANG)
-LOCAL_MODULE:= libsigchain
-LOCAL_SHARED_LIBRARIES := liblog
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
-$(eval $(call set-target-local-clang-vars))
-include $(BUILD_STATIC_LIBRARY)
-
-# Build host library.
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
-LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
-LOCAL_CLANG := $(ART_HOST_CLANG)
-LOCAL_SRC_FILES := sigchain_dummy.cc
-LOCAL_MODULE:= libsigchain
-LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
-LOCAL_LDLIBS := -ldl
-LOCAL_MULTILIB := both
-LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
-include $(BUILD_HOST_SHARED_LIBRARY)
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
-LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
-LOCAL_CLANG := $(ART_HOST_CLANG)
-LOCAL_SRC_FILES := sigchain.cc
-LOCAL_MODULE:= libsigchain
-LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
-LOCAL_LDLIBS := -ldl
-LOCAL_MULTILIB := both
-include $(BUILD_HOST_STATIC_LIBRARY)
-
-# Create a dummy version of libsigchain which expose the necessary symbols
-# but throws when called. This can be used to get static binaries which don't
-# need the real functionality of the sig chain but need to please the linker.
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
-LOCAL_CLANG := $(ART_HOST_CLANG)
-LOCAL_SRC_FILES := sigchain_dummy.cc
-LOCAL_MODULE:= libsigchain_dummy
-LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
-LOCAL_LDLIBS := -ldl
-LOCAL_MULTILIB := both
-include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/bisection-search/README.md b/tools/bisection-search/README.md
new file mode 100644
index 0000000..857c930
--- /dev/null
+++ b/tools/bisection-search/README.md
@@ -0,0 +1,43 @@
+Bisection Bug Search
+====================
+
+Bisection Bug Search is a tool for finding compiler optimizations bugs. It
+accepts a program which exposes a bug by producing incorrect output and expected
+output for the program. It then attempts to narrow down the issue to a single
+method and optimization pass under the assumption that interpreter is correct.
+
+Given methods in order M0..Mn finds smallest i such that compiling Mi and
+interpreting all other methods produces incorrect output. Then, given ordered
+optimization passes P0..Pl, finds smallest j such that compiling Mi with passes
+P0..Pj-1 produces expected output and compiling Mi with passes P0..Pj produces
+incorrect output. Prints Mi and Pj.
+
+How to run Bisection Bug Search
+===============================
+
+ bisection_search.py [-h] -cp CLASSPATH
+ [--expected-output EXPECTED_OUTPUT] [--device]
+ [--lib LIB] [--64]
+ [--dalvikvm-option [OPTION [OPTION ...]]]
+ [--arg [TEST_ARGS [TEST_ARGS ...]]] [--image IMAGE]
+ [--verbose]
+ classname
+
+ positional arguments:
+ classname name of class to run
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -cp CLASSPATH, --classpath CLASSPATH
+ classpath
+ --expected-output EXPECTED_OUTPUT
+ file containing expected output
+ --device run on device
+ --lib LIB lib to use, default: libart.so
+ --64 x64 mode
+ --dalvikvm-option [OPTION [OPTION ...]]
+ additional dalvikvm option
+ --arg [TEST_ARGS [TEST_ARGS ...]]
+ argument to pass to program
+ --image IMAGE path to image
+ --verbose enable verbose output
diff --git a/tools/bisection-search/bisection_search.py b/tools/bisection-search/bisection_search.py
new file mode 100755
index 0000000..d6c1749
--- /dev/null
+++ b/tools/bisection-search/bisection_search.py
@@ -0,0 +1,300 @@
+#!/usr/bin/env python3.4
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Performs bisection bug search on methods and optimizations.
+
+See README.md.
+
+Example usage:
+./bisection-search.py -cp classes.dex --expected-output output Test
+"""
+
+import argparse
+import re
+import sys
+
+from common import DeviceTestEnv
+from common import FatalError
+from common import GetEnvVariableOrError
+from common import HostTestEnv
+
+# Passes that are never disabled during search process because disabling them
+# would compromise correctness.
+MANDATORY_PASSES = ['dex_cache_array_fixups_arm',
+ 'dex_cache_array_fixups_mips',
+ 'instruction_simplifier$before_codegen',
+ 'pc_relative_fixups_x86',
+ 'pc_relative_fixups_mips',
+ 'x86_memory_operand_generation']
+
+# Passes that show up as optimizations in compiler verbose output but aren't
+# driven by run-passes mechanism. They are mandatory and will always run, we
+# never pass them to --run-passes.
+NON_PASSES = ['builder', 'prepare_for_register_allocation',
+ 'liveness', 'register']
+
+
+class Dex2OatWrapperTestable(object):
+ """Class representing a testable compilation.
+
+ Accepts filters on compiled methods and optimization passes.
+ """
+
+ def __init__(self, base_cmd, test_env, class_name, args,
+ expected_output=None, verbose=False):
+ """Constructor.
+
+ Args:
+ base_cmd: list of strings, base command to run.
+ test_env: ITestEnv.
+ class_name: string, name of class to run.
+ args: list of strings, program arguments to pass.
+ expected_output: string, expected output to compare against or None.
+ verbose: bool, enable verbose output.
+ """
+ self._base_cmd = base_cmd
+ self._test_env = test_env
+ self._class_name = class_name
+ self._args = args
+ self._expected_output = expected_output
+ self._compiled_methods_path = self._test_env.CreateFile('compiled_methods')
+ self._passes_to_run_path = self._test_env.CreateFile('run_passes')
+ self._verbose = verbose
+
+ def Test(self, compiled_methods, passes_to_run=None):
+ """Tests compilation with compiled_methods and run_passes switches active.
+
+ If compiled_methods is None then compiles all methods.
+ If passes_to_run is None then runs default passes.
+
+ Args:
+ compiled_methods: list of strings representing methods to compile or None.
+ passes_to_run: list of strings representing passes to run or None.
+
+ Returns:
+ True if test passes with given settings. False otherwise.
+ """
+ if self._verbose:
+ print('Testing methods: {0} passes:{1}.'.format(
+ compiled_methods, passes_to_run))
+ cmd = self._PrepareCmd(compiled_methods=compiled_methods,
+ passes_to_run=passes_to_run,
+ verbose_compiler=True)
+ (output, _, ret_code) = self._test_env.RunCommand(cmd)
+ res = ret_code == 0 and (self._expected_output is None
+ or output == self._expected_output)
+ if self._verbose:
+ print('Test passed: {0}.'.format(res))
+ return res
+
+ def GetAllMethods(self):
+ """Get methods compiled during the test.
+
+ Returns:
+ List of strings representing methods compiled during the test.
+
+ Raises:
+ FatalError: An error occurred when retrieving methods list.
+ """
+ cmd = self._PrepareCmd(verbose_compiler=True)
+ (_, err_output, _) = self._test_env.RunCommand(cmd)
+ match_methods = re.findall(r'Building ([^\n]+)\n', err_output)
+ if not match_methods:
+ raise FatalError('Failed to retrieve methods list. '
+ 'Not recognized output format.')
+ return match_methods
+
+ def GetAllPassesForMethod(self, compiled_method):
+ """Get all optimization passes ran for a method during the test.
+
+ Args:
+ compiled_method: string representing method to compile.
+
+ Returns:
+ List of strings representing passes ran for compiled_method during test.
+
+ Raises:
+ FatalError: An error occurred when retrieving passes list.
+ """
+ cmd = self._PrepareCmd(compiled_methods=[compiled_method],
+ verbose_compiler=True)
+ (_, err_output, _) = self._test_env.RunCommand(cmd)
+ match_passes = re.findall(r'Starting pass: ([^\n]+)\n', err_output)
+ if not match_passes:
+ raise FatalError('Failed to retrieve passes list. '
+ 'Not recognized output format.')
+ return [p for p in match_passes if p not in NON_PASSES]
+
+ def _PrepareCmd(self, compiled_methods=None, passes_to_run=None,
+ verbose_compiler=False):
+ """Prepare command to run."""
+ cmd = list(self._base_cmd)
+ if compiled_methods is not None:
+ self._test_env.WriteLines(self._compiled_methods_path, compiled_methods)
+ cmd += ['-Xcompiler-option', '--compiled-methods={0}'.format(
+ self._compiled_methods_path)]
+ if passes_to_run is not None:
+ self._test_env.WriteLines(self._passes_to_run_path, passes_to_run)
+ cmd += ['-Xcompiler-option', '--run-passes={0}'.format(
+ self._passes_to_run_path)]
+ if verbose_compiler:
+ cmd += ['-Xcompiler-option', '--runtime-arg', '-Xcompiler-option',
+ '-verbose:compiler']
+ cmd += ['-classpath', self._test_env.classpath, self._class_name]
+ cmd += self._args
+ return cmd
+
+
+def BinarySearch(start, end, test):
+ """Binary search integers using test function to guide the process."""
+ while start < end:
+ mid = (start + end) // 2
+ if test(mid):
+ start = mid + 1
+ else:
+ end = mid
+ return start
+
+
+def FilterPasses(passes, cutoff_idx):
+ """Filters passes list according to cutoff_idx but keeps mandatory passes."""
+ return [opt_pass for idx, opt_pass in enumerate(passes)
+ if opt_pass in MANDATORY_PASSES or idx < cutoff_idx]
+
+
+def BugSearch(testable):
+ """Find buggy (method, optimization pass) pair for a given testable.
+
+ Args:
+ testable: Dex2OatWrapperTestable.
+
+ Returns:
+ (string, string) tuple. First element is name of method which when compiled
+ exposes test failure. Second element is name of optimization pass such that
+ for aforementioned method running all passes up to and excluding the pass
+ results in test passing but running all passes up to and including the pass
+ results in test failing.
+
+ (None, None) if test passes when compiling all methods.
+ (string, None) if a method is found which exposes the failure, but the
+ failure happens even when running just mandatory passes.
+
+ Raises:
+ FatalError: Testable fails with no methods compiled.
+ AssertionError: Method failed for all passes when bisecting methods, but
+ passed when bisecting passes. Possible sporadic failure.
+ """
+ all_methods = testable.GetAllMethods()
+ faulty_method_idx = BinarySearch(
+ 0,
+ len(all_methods),
+ lambda mid: testable.Test(all_methods[0:mid]))
+ if faulty_method_idx == len(all_methods):
+ return (None, None)
+ if faulty_method_idx == 0:
+ raise FatalError('Testable fails with no methods compiled. '
+ 'Perhaps issue lies outside of compiler.')
+ faulty_method = all_methods[faulty_method_idx - 1]
+ all_passes = testable.GetAllPassesForMethod(faulty_method)
+ faulty_pass_idx = BinarySearch(
+ 0,
+ len(all_passes),
+ lambda mid: testable.Test([faulty_method],
+ FilterPasses(all_passes, mid)))
+ if faulty_pass_idx == 0:
+ return (faulty_method, None)
+ assert faulty_pass_idx != len(all_passes), 'Method must fail for some passes.'
+ faulty_pass = all_passes[faulty_pass_idx - 1]
+ return (faulty_method, faulty_pass)
+
+
+def PrepareParser():
+ """Prepares argument parser."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-cp', '--classpath', required=True, type=str, help='classpath')
+ parser.add_argument('--expected-output', type=str,
+ help='file containing expected output')
+ parser.add_argument(
+ '--device', action='store_true', default=False, help='run on device')
+ parser.add_argument('classname', type=str, help='name of class to run')
+ parser.add_argument('--lib', dest='lib', type=str, default='libart.so',
+ help='lib to use, default: libart.so')
+ parser.add_argument('--64', dest='x64', action='store_true',
+ default=False, help='x64 mode')
+ parser.add_argument('--dalvikvm-option', dest='dalvikvm_opts',
+ metavar='OPTION', nargs='*', default=[],
+ help='additional dalvikvm option')
+ parser.add_argument('--arg', dest='test_args', nargs='*', default=[],
+ help='argument to pass to program')
+ parser.add_argument('--image', type=str, help='path to image')
+ parser.add_argument('--verbose', action='store_true',
+ default=False, help='enable verbose output')
+ return parser
+
+
+def main():
+ # Parse arguments
+ parser = PrepareParser()
+ args = parser.parse_args()
+
+ # Prepare environment
+ if args.expected_output is not None:
+ with open(args.expected_output, 'r') as f:
+ expected_output = f.read()
+ else:
+ expected_output = None
+ if args.device:
+ run_cmd = ['dalvikvm64'] if args.x64 else ['dalvikvm32']
+ test_env = DeviceTestEnv(args.classpath)
+ else:
+ run_cmd = ['dalvikvm64'] if args.x64 else ['dalvikvm32']
+ run_cmd += ['-XXlib:{0}'.format(args.lib)]
+ if not args.image:
+ image_path = '{0}/framework/core-optimizing-pic.art'.format(
+ GetEnvVariableOrError('ANDROID_HOST_OUT'))
+ else:
+ image_path = args.image
+ run_cmd += ['-Ximage:{0}'.format(image_path)]
+ if args.dalvikvm_opts:
+ run_cmd += args.dalvikvm_opts
+ test_env = HostTestEnv(args.classpath, args.x64)
+
+ # Perform the search
+ try:
+ testable = Dex2OatWrapperTestable(run_cmd, test_env, args.classname,
+ args.test_args, expected_output,
+ args.verbose)
+ (method, opt_pass) = BugSearch(testable)
+ except Exception as e:
+ print('Error. Refer to logfile: {0}'.format(test_env.logfile.name))
+ test_env.logfile.write('Exception: {0}\n'.format(e))
+ raise
+
+ # Report results
+ if method is None:
+ print('Couldn\'t find any bugs.')
+ elif opt_pass is None:
+ print('Faulty method: {0}. Fails with just mandatory passes.'.format(
+ method))
+ else:
+ print('Faulty method and pass: {0}, {1}.'.format(method, opt_pass))
+ print('Logfile: {0}'.format(test_env.logfile.name))
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/bisection-search/bisection_test.py b/tools/bisection-search/bisection_test.py
new file mode 100755
index 0000000..9aa08fb
--- /dev/null
+++ b/tools/bisection-search/bisection_test.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python3.4
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for bisection-search module."""
+
+import unittest
+
+from unittest.mock import Mock
+
+from bisection_search import BugSearch
+from bisection_search import Dex2OatWrapperTestable
+from bisection_search import FatalError
+from bisection_search import MANDATORY_PASSES
+
+
+class BisectionTestCase(unittest.TestCase):
+ """BugSearch method test case.
+
+ Integer constants were chosen arbitrarily. They should be large enough and
+ random enough to ensure binary search does nontrivial work.
+
+ Attributes:
+ _METHODS: list of strings, methods compiled by testable
+ _PASSES: list of strings, passes run by testable
+ _FAILING_METHOD: string, name of method which fails in some tests
+ _FAILING_PASS: string, name of pass which fails in some tests
+ _MANDATORY_PASS: string, name of a mandatory pass
+ """
+ _METHODS_COUNT = 1293
+ _PASSES_COUNT = 573
+ _FAILING_METHOD_IDX = 237
+ _FAILING_PASS_IDX = 444
+ _METHODS = ['method_{0}'.format(i) for i in range(_METHODS_COUNT)]
+ _PASSES = ['pass_{0}'.format(i) for i in range(_PASSES_COUNT)]
+ _FAILING_METHOD = _METHODS[_FAILING_METHOD_IDX]
+ _FAILING_PASS = _PASSES[_FAILING_PASS_IDX]
+ _MANDATORY_PASS = MANDATORY_PASSES[0]
+
+ def setUp(self):
+ self.testable_mock = Mock(spec=Dex2OatWrapperTestable)
+ self.testable_mock.GetAllMethods.return_value = self._METHODS
+ self.testable_mock.GetAllPassesForMethod.return_value = self._PASSES
+
+ def MethodFailsForAllPasses(self, compiled_methods, run_passes=None):
+ return self._FAILING_METHOD not in compiled_methods
+
+ def MethodFailsForAPass(self, compiled_methods, run_passes=None):
+ return (self._FAILING_METHOD not in compiled_methods or
+ (run_passes is not None and self._FAILING_PASS not in run_passes))
+
+ def testNeverFails(self):
+ self.testable_mock.Test.return_value = True
+ res = BugSearch(self.testable_mock)
+ self.assertEqual(res, (None, None))
+
+ def testAlwaysFails(self):
+ self.testable_mock.Test.return_value = False
+ with self.assertRaises(FatalError):
+ BugSearch(self.testable_mock)
+
+ def testAMethodFailsForAllPasses(self):
+ self.testable_mock.Test.side_effect = self.MethodFailsForAllPasses
+ res = BugSearch(self.testable_mock)
+ self.assertEqual(res, (self._FAILING_METHOD, None))
+
+ def testAMethodFailsForAPass(self):
+ self.testable_mock.Test.side_effect = self.MethodFailsForAPass
+ res = BugSearch(self.testable_mock)
+ self.assertEqual(res, (self._FAILING_METHOD, self._FAILING_PASS))
+
+ def testMandatoryPassPresent(self):
+ self.testable_mock.GetAllPassesForMethod.return_value += (
+ [self._MANDATORY_PASS])
+ self.testable_mock.Test.side_effect = self.MethodFailsForAPass
+ BugSearch(self.testable_mock)
+ for (ordered_args, keyword_args) in self.testable_mock.Test.call_args_list:
+ passes = None
+ if 'run_passes' in keyword_args:
+ passes = keyword_args['run_passes']
+ if len(ordered_args) > 1: # run_passes passed as ordered argument
+ passes = ordered_args[1]
+ if passes is not None:
+ self.assertIn(self._MANDATORY_PASS, passes)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/bisection-search/common.py b/tools/bisection-search/common.py
new file mode 100755
index 0000000..8361fc9
--- /dev/null
+++ b/tools/bisection-search/common.py
@@ -0,0 +1,318 @@
+#!/usr/bin/env python3.4
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module containing common logic from python testing tools."""
+
+import abc
+import os
+import shlex
+
+from subprocess import check_call
+from subprocess import PIPE
+from subprocess import Popen
+from subprocess import TimeoutExpired
+
+from tempfile import mkdtemp
+from tempfile import NamedTemporaryFile
+
+# Temporary directory path on device.
+DEVICE_TMP_PATH = '/data/local/tmp'
+
+# Architectures supported in dalvik cache.
+DALVIK_CACHE_ARCHS = ['arm', 'arm64', 'x86', 'x86_64']
+
+
+def GetEnvVariableOrError(variable_name):
+ """Gets value of an environmental variable.
+
+ If the variable is not set raises FatalError.
+
+ Args:
+ variable_name: string, name of variable to get.
+
+ Returns:
+ string, value of requested variable.
+
+ Raises:
+ FatalError: Requested variable is not set.
+ """
+ top = os.environ.get(variable_name)
+ if top is None:
+ raise FatalError('{0} environmental variable not set.'.format(
+ variable_name))
+ return top
+
+
+def _DexArchCachePaths(android_data_path):
+ """Returns paths to architecture specific caches.
+
+ Args:
+ android_data_path: string, path dalvik-cache resides in.
+
+ Returns:
+ Iterable paths to architecture specific caches.
+ """
+ return ('{0}/dalvik-cache/{1}'.format(android_data_path, arch)
+ for arch in DALVIK_CACHE_ARCHS)
+
+
+def _RunCommandForOutputAndLog(cmd, env, logfile, timeout=60):
+ """Runs command and logs its output. Returns the output.
+
+ Args:
+ cmd: list of strings, command to run.
+ env: shell environment to run the command with.
+ logfile: file handle to logfile.
+ timeout: int, timeout in seconds
+
+ Returns:
+ tuple (string, string, int) stdout output, stderr output, return code.
+ """
+ proc = Popen(cmd, stderr=PIPE, stdout=PIPE, env=env, universal_newlines=True)
+ timeouted = False
+ try:
+ (output, err_output) = proc.communicate(timeout=timeout)
+ except TimeoutExpired:
+ timeouted = True
+ proc.kill()
+ (output, err_output) = proc.communicate()
+ logfile.write('Command:\n{0}\n{1}{2}\nReturn code: {3}\n'.format(
+ _CommandListToCommandString(cmd), err_output, output,
+ 'TIMEOUT' if timeouted else proc.returncode))
+ ret_code = 1 if timeouted else proc.returncode
+ return (output, err_output, ret_code)
+
+
+def _CommandListToCommandString(cmd):
+ """Converts shell command represented as list of strings to a single string.
+
+ Each element of the list is wrapped in double quotes.
+
+ Args:
+ cmd: list of strings, shell command.
+
+ Returns:
+ string, shell command.
+ """
+ return ' '.join(['"{0}"'.format(segment) for segment in cmd])
+
+
+class FatalError(Exception):
+ """Fatal error in script."""
+
+
+class ITestEnv(object):
+ """Test environment abstraction.
+
+ Provides unified interface for interacting with host and device test
+ environments. Creates a test directory and expose methods to modify test files
+ and run commands.
+ """
+ __meta_class__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def CreateFile(self, name=None):
+ """Creates a file in test directory.
+
+ Returned path to file can be used in commands run in the environment.
+
+ Args:
+ name: string, file name. If None file is named arbitrarily.
+
+ Returns:
+ string, environment specific path to file.
+ """
+
+ @abc.abstractmethod
+ def WriteLines(self, file_path, lines):
+ """Writes lines to a file in test directory.
+
+ If file exists it gets overwritten. If file doest not exist it is created.
+
+ Args:
+ file_path: string, environment specific path to file.
+ lines: list of strings to write.
+ """
+
+ @abc.abstractmethod
+ def RunCommand(self, cmd):
+ """Runs command in environment.
+
+ Args:
+ cmd: string, command to run.
+
+ Returns:
+ tuple (string, string, int) stdout output, stderr output, return code.
+ """
+
+ @abc.abstractproperty
+ def classpath(self):
+ """Gets environment specific classpath with test class."""
+
+ @abc.abstractproperty
+ def logfile(self):
+ """Gets file handle to logfile residing on host."""
+
+
+class HostTestEnv(ITestEnv):
+ """Host test environment. Concrete implementation of ITestEnv.
+
+ Maintains a test directory in /tmp/. Runs commands on the host in modified
+ shell environment. Mimics art script behavior.
+
+ For methods documentation see base class.
+ """
+
+ def __init__(self, classpath, x64):
+ """Constructor.
+
+ Args:
+ classpath: string, classpath with test class.
+ x64: boolean, whether to setup in x64 mode.
+ """
+ self._classpath = classpath
+ self._env_path = mkdtemp(dir='/tmp/', prefix='bisection_search_')
+ self._logfile = open('{0}/log'.format(self._env_path), 'w+')
+ os.mkdir('{0}/dalvik-cache'.format(self._env_path))
+ for arch_cache_path in _DexArchCachePaths(self._env_path):
+ os.mkdir(arch_cache_path)
+ lib = 'lib64' if x64 else 'lib'
+ android_root = GetEnvVariableOrError('ANDROID_HOST_OUT')
+ library_path = android_root + '/' + lib
+ path = android_root + '/bin'
+ self._shell_env = os.environ.copy()
+ self._shell_env['ANDROID_DATA'] = self._env_path
+ self._shell_env['ANDROID_ROOT'] = android_root
+ self._shell_env['LD_LIBRARY_PATH'] = library_path
+ self._shell_env['PATH'] = (path + ':' + self._shell_env['PATH'])
+ # Using dlopen requires load bias on the host.
+ self._shell_env['LD_USE_LOAD_BIAS'] = '1'
+
+ def CreateFile(self, name=None):
+ if name is None:
+ f = NamedTemporaryFile(dir=self._env_path, delete=False)
+ else:
+ f = open('{0}/{1}'.format(self._env_path, name), 'w+')
+ return f.name
+
+ def WriteLines(self, file_path, lines):
+ with open(file_path, 'w') as f:
+ f.writelines('{0}\n'.format(line) for line in lines)
+ return
+
+ def RunCommand(self, cmd):
+ self._EmptyDexCache()
+ return _RunCommandForOutputAndLog(cmd, self._shell_env, self._logfile)
+
+ @property
+ def classpath(self):
+ return self._classpath
+
+ @property
+ def logfile(self):
+ return self._logfile
+
+ def _EmptyDexCache(self):
+ """Empties dex cache.
+
+ Iterate over files in architecture specific cache directories and remove
+ them.
+ """
+ for arch_cache_path in _DexArchCachePaths(self._env_path):
+ for file_path in os.listdir(arch_cache_path):
+ file_path = '{0}/{1}'.format(arch_cache_path, file_path)
+ if os.path.isfile(file_path):
+ os.unlink(file_path)
+
+
+class DeviceTestEnv(ITestEnv):
+ """Device test environment. Concrete implementation of ITestEnv.
+
+ Makes use of HostTestEnv to maintain a test directory on host. Creates an
+ on device test directory which is kept in sync with the host one.
+
+ For methods documentation see base class.
+ """
+
+ def __init__(self, classpath):
+ """Constructor.
+
+ Args:
+ classpath: string, classpath with test class.
+ """
+ self._host_env_path = mkdtemp(dir='/tmp/', prefix='bisection_search_')
+ self._logfile = open('{0}/log'.format(self._host_env_path), 'w+')
+ self._device_env_path = '{0}/{1}'.format(
+ DEVICE_TMP_PATH, os.path.basename(self._host_env_path))
+ self._classpath = os.path.join(
+ self._device_env_path, os.path.basename(classpath))
+ self._shell_env = os.environ
+
+ self._AdbMkdir('{0}/dalvik-cache'.format(self._device_env_path))
+ for arch_cache_path in _DexArchCachePaths(self._device_env_path):
+ self._AdbMkdir(arch_cache_path)
+
+ paths = classpath.split(':')
+ device_paths = []
+ for path in paths:
+ device_paths.append('{0}/{1}'.format(
+ self._device_env_path, os.path.basename(path)))
+ self._AdbPush(path, self._device_env_path)
+ self._classpath = ':'.join(device_paths)
+
+ def CreateFile(self, name=None):
+ with NamedTemporaryFile(mode='w') as temp_file:
+ self._AdbPush(temp_file.name, self._device_env_path)
+ if name is None:
+ name = os.path.basename(temp_file.name)
+ return '{0}/{1}'.format(self._device_env_path, name)
+
+ def WriteLines(self, file_path, lines):
+ with NamedTemporaryFile(mode='w') as temp_file:
+ temp_file.writelines('{0}\n'.format(line) for line in lines)
+ self._AdbPush(temp_file.name, file_path)
+ return
+
+ def RunCommand(self, cmd):
+ self._EmptyDexCache()
+ cmd = _CommandListToCommandString(cmd)
+ cmd = ('adb shell "logcat -c && ANDROID_DATA={0} {1} && '
+ 'logcat -d dex2oat:* *:S 1>&2"').format(self._device_env_path, cmd)
+ return _RunCommandForOutputAndLog(shlex.split(cmd), self._shell_env,
+ self._logfile)
+
+ @property
+ def classpath(self):
+ return self._classpath
+
+ @property
+ def logfile(self):
+ return self._logfile
+
+ def _AdbPush(self, what, where):
+ check_call(shlex.split('adb push "{0}" "{1}"'.format(what, where)),
+ stdout=self._logfile, stderr=self._logfile)
+
+ def _AdbMkdir(self, path):
+ check_call(shlex.split('adb shell mkdir "{0}" -p'.format(path)),
+ stdout=self._logfile, stderr=self._logfile)
+
+ def _EmptyDexCache(self):
+ """Empties dex cache."""
+ for arch_cache_path in _DexArchCachePaths(self._device_env_path):
+ cmd = 'adb shell if [ -d "{0}" ]; then rm -f "{0}"/*; fi'.format(
+ arch_cache_path)
+ check_call(shlex.split(cmd), stdout=self._logfile, stderr=self._logfile)