Merge "The libc++ gtest is being renamed to libgtest."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 7e58f5c..7cb23f8 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -173,7 +173,9 @@
ifeq ($(ART_HOST_CLANG),true)
- ART_HOST_CFLAGS += $(art_clang_cflags)
+ # Bug: 15446488. We don't omit the frame pointer to work around
+ # clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
+ ART_HOST_CFLAGS += $(art_clang_cflags) -fno-omit-frame-pointer
else
ART_HOST_CFLAGS += $(art_gcc_cflags)
endif
@@ -241,7 +243,9 @@
ifeq ($(HOST_OS),linux)
# Larger frame-size for host clang builds today
- art_host_non_debug_cflags += -Wframe-larger-than=2600
+ ifndef SANITIZE_HOST
+ art_host_non_debug_cflags += -Wframe-larger-than=2600
+ endif
art_target_non_debug_cflags += -Wframe-larger-than=1728
endif
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b421f27..7e28b37 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,18 +61,24 @@
# The elf writer test has dependencies on core.oat.
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32)
-ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
-ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
+
+# TODO: document why this is needed.
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
-ART_GTEST_dex_method_iterator_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/arch/arch_test.cc \
+ runtime/arch/instruction_set_test.cc \
+ runtime/arch/instruction_set_features_test.cc \
runtime/arch/memcmp16_test.cc \
runtime/arch/stub_test.cc \
+ runtime/arch/arm/instruction_set_features_arm_test.cc \
+ runtime/arch/arm64/instruction_set_features_arm64_test.cc \
+ runtime/arch/mips/instruction_set_features_mips_test.cc \
+ runtime/arch/x86/instruction_set_features_x86_test.cc \
+ runtime/arch/x86_64/instruction_set_features_x86_64_test.cc \
runtime/barrier_test.cc \
runtime/base/bit_field_test.cc \
runtime/base/bit_vector_test.cc \
@@ -110,9 +116,9 @@
runtime/handle_scope_test.cc \
runtime/indenter_test.cc \
runtime/indirect_reference_table_test.cc \
- runtime/instruction_set_test.cc \
runtime/intern_table_test.cc \
runtime/interpreter/safe_math_test.cc \
+ runtime/java_vm_ext_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
@@ -186,7 +192,9 @@
COMPILER_GTEST_HOST_SRC_FILES := \
$(COMPILER_GTEST_COMMON_SRC_FILES) \
- compiler/utils//assembler_thumb_test.cc \
+ compiler/utils/arm/assembler_arm32_test.cc \
+ compiler/utils/arm/assembler_thumb2_test.cc \
+ compiler/utils/assembler_thumb_test.cc \
compiler/utils/x86/assembler_x86_test.cc \
compiler/utils/x86_64/assembler_x86_64_test.cc
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 9fe3807..e8b363b 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -23,6 +23,13 @@
include art/build/Android.common_build.mk
+ifeq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
+ DEX2OAT_HOST_INSTRUCTION_SET_FEATURES := default
+endif
+ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
+ $(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES := default
+endif
+
# Use dex2oat debug version for better error reporting
# $(1): compiler - default, optimizing or interpreter.
# $(2): pic/no-pic
@@ -91,7 +98,7 @@
$$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(3)ART_HOST_ARCH) \
- --instruction-set-features=$$($(3)HOST_INSTRUCTION_SET_FEATURES) \
+ --instruction-set-features=$$($(3)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) \
--host --android-root=$$(HOST_OUT) --include-patch-information \
$$(PRIVATE_CORE_COMPILE_OPTIONS)
@@ -194,7 +201,7 @@
$$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(3)TARGET_ARCH) \
- --instruction-set-features=$$($(3)TARGET_INSTRUCTION_SET_FEATURES) \
+ --instruction-set-features=$$($(3)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$$(PRODUCT_OUT)/system --include-patch-information \
$$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index bfdb537..97387a1 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -16,6 +16,7 @@
#include "common_compiler_test.h"
+#include "arch/instruction_set_features.h"
#include "class_linker.h"
#include "compiled_method.h"
#include "dex/quick_compiler_callbacks.h"
@@ -156,7 +157,7 @@
method_inliner_map_.get(),
compiler_kind, instruction_set,
instruction_set_features_.get(),
- true, new std::set<std::string>,
+ true, new std::set<std::string>, nullptr,
2, true, true, timer_.get(), ""));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 0361cd1..7f76eef 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -21,7 +21,7 @@
#include <string>
#include <vector>
-#include "instruction_set.h"
+#include "arch/instruction_set.h"
#include "method_reference.h"
#include "utils.h"
#include "utils/array_ref.h"
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 0d5aa90..0c7812b 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -14,8 +14,10 @@
* limitations under the License.
*/
-#include "arm_lir.h"
#include "codegen_arm.h"
+
+#include "arch/arm/instruction_set_features_arm.h"
+#include "arm_lir.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
@@ -974,7 +976,7 @@
LIR* load;
if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
!cu_->compiler_driver->GetInstructionSetFeatures()->
- AsArmInstructionSetFeatures()->HasLpae()) {
+ AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()) {
// Only 64-bit load needs special handling.
// If the cpu supports LPAE, aligned LDRD is atomic - fall through to LoadBaseDisp().
DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadSave().
@@ -1104,7 +1106,7 @@
LIR* store;
if (is_volatile == kVolatile && (size == k64 || size == kDouble) &&
!cu_->compiler_driver->GetInstructionSetFeatures()->
- AsArmInstructionSetFeatures()->HasLpae()) {
+ AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()) {
// Only 64-bit store needs special handling.
// If the cpu supports LPAE, aligned STRD is atomic - fall through to StoreBaseDisp().
// Use STREXD for the atomic store. (Expect displacement > 0, don't optimize for == 0.)
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index da7ac87..9cdabf1 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -881,6 +881,14 @@
LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
}
lir->operands[0] = delta >> 2;
+ if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 1) {
+ // Useless branch.
+ offset_adjustment -= lir->flags.size;
+ lir->flags.is_nop = true;
+ // Don't unlink - just set to do-nothing.
+ lir->flags.fixup = kFixupNone;
+ res = kRetryAll;
+ }
break;
}
case kFixupLoad:
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index a9a58a3..106996e 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -296,7 +296,8 @@
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm64);
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
+ !FrameNeedsStackCheck(frame_size_, kArm64);
NewLIR0(kPseudoMethodEntry);
@@ -320,7 +321,7 @@
// TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
// so that we can avoid the following "sub sp" when spilling?
OpRegRegImm(kOpSub, rs_x8, rs_sp, GetStackOverflowReservedBytes(kArm64));
- LoadWordDisp(rs_x8, 0, rs_x8);
+ Load32Disp(rs_x8, 0, rs_wzr);
MarkPossibleStackOverflowException();
}
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index e57f99c..8a5a58c 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -1147,11 +1147,6 @@
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
- // If index is constant, just fold it into the data offset
- if (constant_index) {
- data_offset += mir_graph_->ConstantValue(rl_index) << scale;
- }
-
/* null object? */
GenNullCheck(rl_array.reg, opt_flags);
@@ -1165,42 +1160,22 @@
} else {
ForceImplicitNullCheck(rl_array.reg, opt_flags);
}
- if (rl_dest.wide || rl_dest.fp || constant_index) {
- RegStorage reg_ptr;
- if (constant_index) {
- reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
- } else {
- // No special indexed operation, lea + load w/ displacement
- reg_ptr = AllocTempRef();
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
- EncodeShift(kA64Lsl, scale));
- FreeTemp(rl_index.reg);
- }
+ if (constant_index) {
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- if (constant_index) {
- GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
- } else {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- }
+ GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
FreeTemp(reg_len);
}
+ // Fold the constant index into the data offset.
+ data_offset += mir_graph_->ConstantValue(rl_index) << scale;
if (rl_result.ref) {
- LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
+ LoadRefDisp(rl_array.reg, data_offset, rl_result.reg, kNotVolatile);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
- }
- if (!constant_index) {
- FreeTemp(reg_ptr);
- }
- if (rl_dest.wide) {
- StoreValueWide(rl_dest, rl_result);
- } else {
- StoreValue(rl_dest, rl_result);
+ LoadBaseDisp(rl_array.reg, data_offset, rl_result.reg, size, kNotVolatile);
}
} else {
- // Offset base, then use indexed load
+ // Offset base, then use indexed load.
RegStorage reg_ptr = AllocTempRef();
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
FreeTemp(rl_array.reg);
@@ -1211,11 +1186,15 @@
FreeTemp(reg_len);
}
if (rl_result.ref) {
- LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
+ LoadRefIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale);
} else {
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
}
FreeTemp(reg_ptr);
+ }
+ if (rl_dest.wide) {
+ StoreValueWide(rl_dest, rl_result);
+ } else {
StoreValue(rl_dest, rl_result);
}
}
@@ -1237,11 +1216,6 @@
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
- // If index is constant, just fold it into the data offset.
- if (constant_index) {
- data_offset += mir_graph_->ConstantValue(rl_index) << scale;
- }
-
rl_array = LoadValue(rl_array, kRefReg);
if (!constant_index) {
rl_index = LoadValue(rl_index, kCoreReg);
@@ -1274,24 +1248,18 @@
ForceImplicitNullCheck(rl_array.reg, opt_flags);
}
/* at this point, reg_ptr points to array, 2 live temps */
- if (rl_src.wide || rl_src.fp || constant_index) {
- if (rl_src.wide) {
- rl_src = LoadValueWide(rl_src, reg_class);
- } else {
- rl_src = LoadValue(rl_src, reg_class);
- }
- if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
- EncodeShift(kA64Lsl, scale));
- }
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(rl_src, reg_class);
+ } else {
+ rl_src = LoadValue(rl_src, reg_class);
+ }
+ if (constant_index) {
if (needs_range_check) {
- if (constant_index) {
- GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
- } else {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- }
+ GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
FreeTemp(reg_len);
}
+ // Fold the constant index into the data offset.
+ data_offset += mir_graph_->ConstantValue(rl_index) << scale;
if (rl_src.ref) {
StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
} else {
@@ -1300,15 +1268,14 @@
} else {
/* reg_ptr -> array data */
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
- rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
if (rl_src.ref) {
- StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
+ StoreRefIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale);
} else {
- StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
+ StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
}
}
if (allocated_reg_ptr_temp) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 061ee07..98ddc36 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "arch/arm/instruction_set_features_arm.h"
#include "dex/compiler_ir.h"
#include "dex/compiler_internals.h"
#include "dex/quick/arm/arm_lir.h"
@@ -161,6 +162,10 @@
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
return GenExplicitNullCheck(m_reg, opt_flags);
}
+ // If null check has not been eliminated, reset redundant store tracking.
+ if ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0) {
+ ResetDefTracking();
+ }
return nullptr;
}
@@ -425,7 +430,11 @@
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
+ if (loc.ref) {
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
+ } else {
+ Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
+ }
}
}
/*
@@ -481,9 +490,17 @@
} else if (!info->is_range) {
// TUNING: interleave
for (int i = 0; i < elems; i++) {
- RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
- Store32Disp(ref_reg,
- mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ RegLocation rl_arg;
+ if (info->args[i].ref) {
+ rl_arg = LoadValue(info->args[i], kRefReg);
+ StoreRefDisp(ref_reg,
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg,
+ kNotVolatile);
+ } else {
+ rl_arg = LoadValue(info->args[i], kCoreReg);
+ Store32Disp(ref_reg,
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
+ }
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(rl_arg.reg)) {
FreeTemp(rl_arg.reg);
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index ca71c30..0d1d9bf 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "mips_lir.h"
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 01784e2..ed73ef0 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the Mips ISA */
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 0a7aa99..495d85e 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -15,6 +15,7 @@
*/
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "mips_lir.h"
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index d58ddb0..fb47238 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the Mips ISA */
#include "codegen_mips.h"
+
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
#include "entrypoints/quick/quick_entrypoints.h"
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 3615916..3df8f2e 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -214,44 +214,43 @@
rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
#endif
-#if (FR_BIT == 0)
- rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
- rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
- rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
- rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
- rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
- rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
- rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
- rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ // Double precision registers where the FPU is in 32-bit mode.
+ rD0_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD2_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD3_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD4_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD5_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD6_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD7_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
- rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
- rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
- rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
- rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
- rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
- rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
- rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
- rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+ rD8_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+ rD9_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+ rD10_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+ rD11_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+ rD12_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+ rD13_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+ rD14_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+ rD15_fr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
#endif
-#else
- rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
- rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
- rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
- rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
- rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
- rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
- rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
- rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+ // Double precision registers where the FPU is in 64-bit mode.
+ rD0_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
+ rD2_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD3_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
+ rD4_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD5_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
+ rD6_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD7_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
- rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
- rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
- rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
- rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
- rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
- rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
- rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
- rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
-#endif
+ rD8_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD9_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
+ rD10_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD11_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+ rD12_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD13_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+ rD14_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ rD15_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
#endif
};
@@ -309,14 +308,23 @@
constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
-constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
-constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
-constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
-constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
-constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
-constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
-constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
-constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD0_fr0(RegStorage::kValid | rD0_fr0);
+constexpr RegStorage rs_rD1_fr0(RegStorage::kValid | rD1_fr0);
+constexpr RegStorage rs_rD2_fr0(RegStorage::kValid | rD2_fr0);
+constexpr RegStorage rs_rD3_fr0(RegStorage::kValid | rD3_fr0);
+constexpr RegStorage rs_rD4_fr0(RegStorage::kValid | rD4_fr0);
+constexpr RegStorage rs_rD5_fr0(RegStorage::kValid | rD5_fr0);
+constexpr RegStorage rs_rD6_fr0(RegStorage::kValid | rD6_fr0);
+constexpr RegStorage rs_rD7_fr0(RegStorage::kValid | rD7_fr0);
+
+constexpr RegStorage rs_rD0_fr1(RegStorage::kValid | rD0_fr1);
+constexpr RegStorage rs_rD1_fr1(RegStorage::kValid | rD1_fr1);
+constexpr RegStorage rs_rD2_fr1(RegStorage::kValid | rD2_fr1);
+constexpr RegStorage rs_rD3_fr1(RegStorage::kValid | rD3_fr1);
+constexpr RegStorage rs_rD4_fr1(RegStorage::kValid | rD4_fr1);
+constexpr RegStorage rs_rD5_fr1(RegStorage::kValid | rD5_fr1);
+constexpr RegStorage rs_rD6_fr1(RegStorage::kValid | rD6_fr1);
+constexpr RegStorage rs_rD7_fr1(RegStorage::kValid | rD7_fr1);
// TODO: reduce/eliminate use of these.
#define rMIPS_SUSPEND rS0
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4a340ec..185112d 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -20,6 +20,7 @@
#include <string>
+#include "arch/mips/instruction_set_features_mips.h"
#include "backend_mips.h"
#include "dex/compiler_internals.h"
#include "dex/quick/mir_to_lir-inl.h"
@@ -34,8 +35,12 @@
static constexpr RegStorage sp_regs_arr[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_regs_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7};
+static constexpr RegStorage dp_fr0_regs_arr[] =
+ {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
+ rs_rD7_fr0};
+static constexpr RegStorage dp_fr1_regs_arr[] =
+ {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
+ rs_rD7_fr1};
static constexpr RegStorage reserved_regs_arr[] =
{rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
static constexpr RegStorage core_temps_arr[] =
@@ -44,17 +49,23 @@
static constexpr RegStorage sp_temps_arr[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_temps_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7};
+static constexpr RegStorage dp_fr0_temps_arr[] =
+ {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
+ rs_rD7_fr0};
+static constexpr RegStorage dp_fr1_temps_arr[] =
+ {rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
+ rs_rD7_fr1};
static constexpr ArrayRef<const RegStorage> empty_pool;
static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr0_regs(dp_fr0_regs_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr1_regs(dp_fr1_regs_arr);
static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr0_temps(dp_fr0_temps_arr);
+static constexpr ArrayRef<const RegStorage> dp_fr1_temps(dp_fr1_temps_arr);
RegLocation MipsMir2Lir::LocCReturn() {
return mips_loc_c_return;
@@ -129,14 +140,17 @@
* Decode the register id.
*/
ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
- return reg.IsDouble()
- /* Each double register is equal to a pair of single-precision FP registers */
-#if (FR_BIT == 0)
- ? ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0)
-#else
- ? ResourceMask::TwoBits(reg.GetRegNum() * 2 + kMipsFPReg0)
-#endif
- : ResourceMask::Bit(reg.IsSingle() ? reg.GetRegNum() + kMipsFPReg0 : reg.GetRegNum());
+ if (reg.IsDouble()) {
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
+ return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
+ } else {
+ return ResourceMask::TwoBits(reg.GetRegNum() * 2 + kMipsFPReg0);
+ }
+ } else if (reg.IsSingle()) {
+ return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+ } else {
+ return ResourceMask::Bit(reg.GetRegNum());
+ }
}
ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
@@ -382,14 +396,25 @@
Clobber(rs_rF13);
Clobber(rs_rF14);
Clobber(rs_rF15);
- Clobber(rs_rD0);
- Clobber(rs_rD1);
- Clobber(rs_rD2);
- Clobber(rs_rD3);
- Clobber(rs_rD4);
- Clobber(rs_rD5);
- Clobber(rs_rD6);
- Clobber(rs_rD7);
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
+ Clobber(rs_rD0_fr0);
+ Clobber(rs_rD1_fr0);
+ Clobber(rs_rD2_fr0);
+ Clobber(rs_rD3_fr0);
+ Clobber(rs_rD4_fr0);
+ Clobber(rs_rD5_fr0);
+ Clobber(rs_rD6_fr0);
+ Clobber(rs_rD7_fr0);
+ } else {
+ Clobber(rs_rD0_fr1);
+ Clobber(rs_rD1_fr1);
+ Clobber(rs_rD2_fr1);
+ Clobber(rs_rD3_fr1);
+ Clobber(rs_rD4_fr1);
+ Clobber(rs_rD5_fr1);
+ Clobber(rs_rD6_fr1);
+ Clobber(rs_rD7_fr1);
+ }
}
RegLocation MipsMir2Lir::GetReturnWideAlt() {
@@ -420,33 +445,37 @@
FreeTemp(rs_rMIPS_ARG3);
}
-bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
- UNUSED(barrier_kind);
-#if ANDROID_SMP != 0
- NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
- return true;
-#else
- return false;
-#endif
+bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
+ if (cu_->GetInstructionSetFeatures()->IsSmp()) {
+ NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
+ return true;
+ } else {
+ return false;
+ }
}
void MipsMir2Lir::CompilerInitializeRegAlloc() {
+ const bool fpu_is_32bit =
+ cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint();
reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
- sp_regs, dp_regs,
+ sp_regs,
+ fpu_is_32bit ? dp_fr0_regs : dp_fr1_regs,
reserved_regs, empty_pool /* reserved64 */,
core_temps, empty_pool /* core64_temps */,
- sp_temps, dp_temps));
+ sp_temps,
+ fpu_is_32bit ? dp_fr0_temps : dp_fr1_temps));
// Target-specific adjustments.
// Alias single precision floats to appropriate half of overlapping double.
for (RegisterInfo* info : reg_pool_->sp_regs_) {
int sp_reg_num = info->GetReg().GetRegNum();
-#if (FR_BIT == 0)
- int dp_reg_num = sp_reg_num & ~1;
-#else
- int dp_reg_num = sp_reg_num >> 1;
-#endif
+ int dp_reg_num;
+ if (fpu_is_32bit) {
+ dp_reg_num = sp_reg_num & ~1;
+ } else {
+ dp_reg_num = sp_reg_num >> 1;
+ }
RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
// Double precision register's master storage should refer to itself.
@@ -465,11 +494,11 @@
// TODO: adjust when we roll to hard float calling convention.
reg_pool_->next_core_reg_ = 2;
reg_pool_->next_sp_reg_ = 2;
-#if (FR_BIT == 0)
- reg_pool_->next_dp_reg_ = 2;
-#else
- reg_pool_->next_dp_reg_ = 1;
-#endif
+ if (fpu_is_32bit) {
+ reg_pool_->next_dp_reg_ = 2;
+ } else {
+ reg_pool_->next_dp_reg_ = 1;
+ }
}
/*
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index a7dc84f..18f1cde 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -15,6 +15,8 @@
*/
#include "codegen_mips.h"
+
+#include "arch/mips/instruction_set_features_mips.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
#include "mips_lir.h"
@@ -304,20 +306,22 @@
case kOpXor:
return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
case kOp2Byte:
-#if __mips_isa_rev >= 2
- res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
-#else
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
-#endif
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+ }
return res;
case kOp2Short:
-#if __mips_isa_rev >= 2
- res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
-#else
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
-#endif
+ if (cu_->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+ }
return res;
case kOp2Char:
return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index bacc6d2..13ebc1e 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
-#include "invoke_type.h"
+#include "arch/instruction_set.h"
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
@@ -26,9 +26,9 @@
#include "dex/backend.h"
#include "dex/quick/resource_mask.h"
#include "driver/compiler_driver.h"
-#include "instruction_set.h"
-#include "leb128.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "invoke_type.h"
+#include "leb128.h"
#include "safe_map.h"
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index bc02eee..4825db6 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -584,16 +584,16 @@
void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
- rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
OpRegCopy(rl_result.reg, rl_src.reg);
// Flip sign bit.
NewLIR2(kX86Rol64RI, rl_result.reg.GetReg(), 1);
NewLIR2(kX86Xor64RI, rl_result.reg.GetReg(), 1);
NewLIR2(kX86Ror64RI, rl_result.reg.GetReg(), 1);
} else {
- OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
- OpRegCopy(rl_result.reg, rl_src.reg);
+ rl_result = ForceTempWide(rl_src);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), 0x80000000);
}
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 781c128..3f501b4 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -2250,13 +2250,6 @@
OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
} else {
rl_result = ForceTempWide(rl_src);
- if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
- ((rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()))) {
- // The registers are the same, so we would clobber it before the use.
- RegStorage temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_result.reg);
- rl_result.reg.SetHighReg(temp_reg.GetReg());
- }
OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_result.reg.GetLow()); // rLow = -rLow
OpRegImm(kOpAdc, rl_result.reg.GetHigh(), 0); // rHigh = rHigh + CF
OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_result.reg.GetHigh()); // rHigh = -rHigh
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index aab94c0..08041e8 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -330,7 +330,8 @@
Compiler::Kind compiler_kind,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
- bool image, std::set<std::string>* image_classes, size_t thread_count,
+ bool image, std::set<std::string>* image_classes,
+ std::set<std::string>* compiled_classes, size_t thread_count,
bool dump_stats, bool dump_passes, CumulativeLogger* timer,
const std::string& profile_file)
: profile_present_(false), compiler_options_(compiler_options),
@@ -346,6 +347,7 @@
non_relative_linker_patch_count_(0u),
image_(image),
image_classes_(image_classes),
+ classes_to_compile_(compiled_classes),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
dump_stats_(dump_stats),
@@ -570,7 +572,7 @@
class_def);
}
CompileMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx, jclass_loader,
- *dex_file, dex_to_dex_compilation_level);
+ *dex_file, dex_to_dex_compilation_level, true);
self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
@@ -613,6 +615,17 @@
}
}
+bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
+ if (!IsImage()) {
+ return true;
+ } else {
+ if (classes_to_compile_ == nullptr) {
+ return true;
+ }
+ return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+ }
+}
+
static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1916,6 +1929,10 @@
it.Next();
}
CompilerDriver* driver = manager->GetCompiler();
+
+ bool compilation_enabled = driver->IsClassToCompile(
+ dex_file.StringByTypeIdx(class_def.class_idx_));
+
// Compile direct methods
int64_t previous_direct_method_idx = -1;
while (it.HasNextDirectMethod()) {
@@ -1929,7 +1946,8 @@
previous_direct_method_idx = method_idx;
driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
}
// Compile virtual methods
@@ -1945,7 +1963,8 @@
previous_virtual_method_idx = method_idx;
driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
it.GetMethodInvokeType(class_def), class_def_index,
- method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
+ method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
+ compilation_enabled);
it.Next();
}
DCHECK(!it.HasNext());
@@ -1977,7 +1996,8 @@
InvokeType invoke_type, uint16_t class_def_idx,
uint32_t method_idx, jobject class_loader,
const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ DexToDexCompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled) {
CompiledMethod* compiled_method = nullptr;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
@@ -1994,7 +2014,8 @@
// Abstract methods don't have code.
} else {
MethodReference method_ref(&dex_file, method_idx);
- bool compile = verification_results_->IsCandidateForCompilation(method_ref, access_flags);
+ bool compile = compilation_enabled &&
+ verification_results_->IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
// NOTE: if compiler declines to compile this method, it will return nullptr.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 682b17a..437a1a9 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -21,6 +21,7 @@
#include <string>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
#include "class_reference.h"
@@ -28,7 +29,6 @@
#include "compiler.h"
#include "dex_file.h"
#include "driver/compiler_options.h"
-#include "instruction_set.h"
#include "invoke_type.h"
#include "method_reference.h"
#include "mirror/class.h" // For mirror::Class::Status.
@@ -51,6 +51,7 @@
class DexCompilationUnit;
class DexFileToMethodInlinerMap;
struct InlineIGetIPutData;
+class InstructionSetFeatures;
class OatWriter;
class ParallelCompilationManager;
class ScopedObjectAccess;
@@ -91,6 +92,7 @@
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
bool image, std::set<std::string>* image_classes,
+ std::set<std::string>* compiled_classes,
size_t thread_count, bool dump_stats, bool dump_passes,
CumulativeLogger* timer, const std::string& profile_file);
@@ -374,6 +376,9 @@
// Checks if class specified by type_idx is one of the image_classes_
bool IsImageClass(const char* descriptor) const;
+ // Checks if the provided class should be compiled, i.e., is in classes_to_compile_.
+ bool IsClassToCompile(const char* descriptor) const;
+
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
LOCKS_EXCLUDED(compiled_classes_lock_);
@@ -475,7 +480,8 @@
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
jobject class_loader, const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level)
+ DexToDexCompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled)
LOCKS_EXCLUDED(compiled_methods_lock_);
static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index)
@@ -514,6 +520,11 @@
// included in the image.
std::unique_ptr<std::set<std::string>> image_classes_;
+ // If image_ is true, specifies the classes that will be compiled in
+ // the image. Note if classes_to_compile_ is nullptr, all classes are
+ // included in the image.
+ std::unique_ptr<std::set<std::string>> classes_to_compile_;
+
size_t thread_count_;
class AOTCompilationStats;
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 7f30565..273b62d 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -17,12 +17,12 @@
#ifndef ART_COMPILER_ELF_BUILDER_H_
#define ART_COMPILER_ELF_BUILDER_H_
+#include "arch/instruction_set.h"
#include "base/stl_util.h"
#include "base/value_object.h"
#include "buffered_output_stream.h"
#include "elf_utils.h"
#include "file_output_stream.h"
-#include "instruction_set.h"
namespace art {
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index cf2cddb..b7283a4 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -484,13 +484,14 @@
}
//
size_t num_reference_fields = h_class->NumReferenceInstanceFields();
+ MemberOffset field_offset = h_class->GetFirstReferenceInstanceFieldOffset();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = h_class->GetInstanceField(i);
- MemberOffset field_offset = field->GetOffset();
mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
}
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
}
@@ -507,13 +508,14 @@
// Walk static fields of a Class.
if (h_obj->IsClass()) {
size_t num_static_fields = klass->NumReferenceStaticFields();
+ MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset();
for (size_t i = 0; i < num_static_fields; ++i) {
- mirror::ArtField* field = klass->GetStaticField(i);
- MemberOffset field_offset = field->GetOffset();
mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
}
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 97b7cc9..ce4ed6d 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "arch/instruction_set_features.h"
#include "class_linker.h"
#include "common_compiler_test.h"
#include "compiler.h"
@@ -97,7 +98,7 @@
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
- InstructionSetFeatures::FromFeatureString(insn_set, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
compiler_options_.reset(new CompilerOptions);
verification_results_.reset(new VerificationResults(compiler_options_.get()));
@@ -109,8 +110,8 @@
verification_results_.get(),
method_inliner_map_.get(),
compiler_kind, insn_set,
- insn_features.get(), false, nullptr, 2, true, true,
- timer_.get(), ""));
+ insn_features.get(), false, nullptr, nullptr, 2, true,
+ true, timer_.get(), ""));
jobject class_loader = nullptr;
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
@@ -198,7 +199,7 @@
InstructionSet insn_set = kX86;
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> insn_features(
- InstructionSetFeatures::FromFeatureString(insn_set, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(insn_set, "default", &error_msg));
ASSERT_TRUE(insn_features.get() != nullptr) << error_msg;
std::vector<const DexFile*> dex_files;
uint32_t image_file_location_oat_checksum = 0;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8418ab0..b51b6e7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -41,25 +41,29 @@
*/
class Temporaries : public ValueObject {
public:
- Temporaries(HGraph* graph, size_t count) : graph_(graph), count_(count), index_(0) {
- graph_->UpdateNumberOfTemporaries(count_);
- }
+ explicit Temporaries(HGraph* graph) : graph_(graph), index_(0) {}
void Add(HInstruction* instruction) {
- // We currently only support vreg size temps.
- DCHECK(instruction->GetType() != Primitive::kPrimLong
- && instruction->GetType() != Primitive::kPrimDouble);
- HInstruction* temp = new (graph_->GetArena()) HTemporary(index_++);
+ HInstruction* temp = new (graph_->GetArena()) HTemporary(index_);
instruction->GetBlock()->AddInstruction(temp);
+
DCHECK(temp->GetPrevious() == instruction);
+
+ size_t offset;
+ if (instruction->GetType() == Primitive::kPrimLong
+ || instruction->GetType() == Primitive::kPrimDouble) {
+ offset = 2;
+ } else {
+ offset = 1;
+ }
+ index_ += offset;
+
+ graph_->UpdateTemporariesVRegSlots(index_);
}
private:
HGraph* const graph_;
- // The total number of temporaries that will be used.
- const size_t count_;
-
// Current index in the temporary stack, updated by `Add`.
size_t index_;
};
@@ -115,37 +119,37 @@
}
template<typename T>
-void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
T* comparison = new (arena_) T(first, second);
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
}
template<typename T>
-void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
T* comparison = new (arena_) T(value, GetIntConstant(0));
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
@@ -192,13 +196,13 @@
InitializeParameters(code_item.ins_size_);
- size_t dex_offset = 0;
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
- // Update the current block if dex_offset starts a new block.
- MaybeUpdateCurrentBlock(dex_offset);
+ // Update the current block if dex_pc starts a new block.
+ MaybeUpdateCurrentBlock(dex_pc);
const Instruction& instruction = *Instruction::At(code_ptr);
- if (!AnalyzeDexInstruction(instruction, dex_offset)) return nullptr;
- dex_offset += instruction.SizeInCodeUnits();
+ if (!AnalyzeDexInstruction(instruction, dex_pc)) return nullptr;
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
}
@@ -239,25 +243,25 @@
// Iterate over all instructions and find branching instructions. Create blocks for
// the locations these instructions branch to.
- size_t dex_offset = 0;
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
const Instruction& instruction = *Instruction::At(code_ptr);
if (instruction.IsBranch()) {
- int32_t target = instruction.GetTargetOffset() + dex_offset;
+ int32_t target = instruction.GetTargetOffset() + dex_pc;
// Create a block for the target instruction.
if (FindBlockStartingAt(target) == nullptr) {
block = new (arena_) HBasicBlock(graph_, target);
branch_targets_.Put(target, block);
}
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
- if ((code_ptr < code_end) && (FindBlockStartingAt(dex_offset) == nullptr)) {
- block = new (arena_) HBasicBlock(graph_, dex_offset);
- branch_targets_.Put(dex_offset, block);
+ if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
+ block = new (arena_) HBasicBlock(graph_, dex_pc);
+ branch_targets_.Put(dex_pc, block);
}
} else {
code_ptr += instruction.SizeInCodeUnits();
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
}
}
}
@@ -291,6 +295,16 @@
}
template<typename T>
+void HGraphBuilder::Binop_23x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), type);
@@ -299,6 +313,16 @@
}
template<typename T>
+void HGraphBuilder::Binop_12x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegA(), type);
+ HInstruction* second = LoadLocal(instruction.VRegB(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
HInstruction* second = GetIntConstant(instruction.VRegC_22s());
@@ -332,7 +356,7 @@
}
bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
@@ -374,39 +398,44 @@
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
HInvoke* invoke = nullptr;
- if (invoke_type == kVirtual || invoke_type == kInterface) {
+ if (invoke_type == kVirtual || invoke_type == kInterface || invoke_type == kSuper) {
MethodReference target_method(dex_file_, method_idx);
uintptr_t direct_code;
uintptr_t direct_method;
int table_index;
InvokeType optimized_invoke_type = invoke_type;
- // TODO: Add devirtualization support.
- compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
+ compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
&optimized_invoke_type, &target_method, &table_index,
&direct_code, &direct_method);
if (table_index == -1) {
return false;
}
- if (invoke_type == kVirtual) {
+ if (optimized_invoke_type == kVirtual) {
invoke = new (arena_) HInvokeVirtual(
- arena_, number_of_arguments, return_type, dex_offset, table_index);
- } else {
- DCHECK_EQ(invoke_type, kInterface);
+ arena_, number_of_arguments, return_type, dex_pc, table_index);
+ } else if (optimized_invoke_type == kInterface) {
invoke = new (arena_) HInvokeInterface(
- arena_, number_of_arguments, return_type, dex_offset, method_idx, table_index);
+ arena_, number_of_arguments, return_type, dex_pc, method_idx, table_index);
+ } else if (optimized_invoke_type == kDirect) {
+ // For this compiler, sharpening only works if we compile PIC.
+ DCHECK(compiler_driver_->GetCompilerOptions().GetCompilePic());
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index);
}
} else {
+ DCHECK(invoke_type == kDirect || invoke_type == kStatic);
// Treat invoke-direct like static calls for now.
invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ arena_, number_of_arguments, return_type, dex_pc, method_idx);
}
size_t start_index = 0;
- Temporaries temps(graph_, is_instance_call ? 1 : 0);
+ Temporaries temps(graph_);
if (is_instance_call) {
HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_offset);
+ HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
invoke->SetArgumentAt(0, null_check);
@@ -420,7 +449,7 @@
bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
if (!is_range && is_wide && args[i] + 1 != args[i + 1]) {
LOG(WARNING) << "Non sequential register pair in " << dex_compilation_unit_->GetSymbol()
- << " at " << dex_offset;
+ << " at " << dex_pc;
// We do not implement non sequential register pair.
return false;
}
@@ -438,7 +467,7 @@
}
bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_22c();
uint32_t obj_reg = instruction.VRegB_22c();
@@ -459,9 +488,9 @@
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot);
- current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_offset));
+ current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
if (is_put) {
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
@@ -485,7 +514,7 @@
bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_21c();
uint16_t field_index = instruction.VRegB_21c();
@@ -515,18 +544,18 @@
}
HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, is_referrers_class, dex_offset);
+ storage_index, is_referrers_class, dex_pc);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
if (!is_initialized) {
- cls = new (arena_) HClinitCheck(constant, dex_offset);
+ cls = new (arena_) HClinitCheck(constant, dex_pc);
current_block_->AddInstruction(cls);
}
if (is_put) {
// We need to keep the class alive before loading the value.
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
temps.Add(cls);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
DCHECK_EQ(value->GetType(), field_type);
@@ -539,29 +568,41 @@
return true;
}
-void HGraphBuilder::BuildCheckedDiv(uint16_t out_reg,
- uint16_t first_reg,
- int32_t second_reg,
- uint32_t dex_offset,
+void HGraphBuilder::BuildCheckedDiv(uint16_t out_vreg,
+ uint16_t first_vreg,
+ int64_t second_vreg_or_constant,
+ uint32_t dex_pc,
Primitive::Type type,
- bool second_is_lit) {
- DCHECK(type == Primitive::kPrimInt);
+ bool second_is_constant) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
- HInstruction* first = LoadLocal(first_reg, type);
- HInstruction* second = second_is_lit ? GetIntConstant(second_reg) : LoadLocal(second_reg, type);
- if (!second->IsIntConstant() || (second->AsIntConstant()->GetValue() == 0)) {
- second = new (arena_) HDivZeroCheck(second, dex_offset);
- Temporaries temps(graph_, 1);
+ HInstruction* first = LoadLocal(first_vreg, type);
+ HInstruction* second = nullptr;
+ if (second_is_constant) {
+ if (type == Primitive::kPrimInt) {
+ second = GetIntConstant(second_vreg_or_constant);
+ } else {
+ second = GetLongConstant(second_vreg_or_constant);
+ }
+ } else {
+ second = LoadLocal(second_vreg_or_constant, type);
+ }
+
+ if (!second_is_constant
+ || (type == Primitive::kPrimInt && second->AsIntConstant()->GetValue() == 0)
+ || (type == Primitive::kPrimLong && second->AsLongConstant()->GetValue() == 0)) {
+ second = new (arena_) HDivZeroCheck(second, dex_pc);
+ Temporaries temps(graph_);
current_block_->AddInstruction(second);
temps.Add(current_block_->GetLastInstruction());
}
- current_block_->AddInstruction(new (arena_) HDiv(type, first, second));
- UpdateLocal(out_reg, current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HDiv(type, first, second, dex_pc));
+ UpdateLocal(out_vreg, current_block_->GetLastInstruction());
}
void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put,
Primitive::Type anticipated_type) {
uint8_t source_or_dest_reg = instruction.VRegA_23x();
@@ -569,10 +610,10 @@
uint8_t index_reg = instruction.VRegC_23x();
// We need one temporary for the null check, one for the index, and one for the length.
- Temporaries temps(graph_, 3);
+ Temporaries temps(graph_);
HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot);
- object = new (arena_) HNullCheck(object, dex_offset);
+ object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
temps.Add(object);
@@ -580,28 +621,28 @@
current_block_->AddInstruction(length);
temps.Add(length);
HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt);
- index = new (arena_) HBoundsCheck(index, length, dex_offset);
+ index = new (arena_) HBoundsCheck(index, length, dex_pc);
current_block_->AddInstruction(index);
temps.Add(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
// TODO: Insert a type check node if the type is Object.
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, anticipated_type, dex_offset));
+ object, index, value, anticipated_type, dex_pc));
} else {
current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
}
-void HGraphBuilder::BuildFilledNewArray(uint32_t dex_offset,
+void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
uint32_t type_index,
uint32_t number_of_vreg_arguments,
bool is_range,
uint32_t* args,
uint32_t register_index) {
HInstruction* length = GetIntConstant(number_of_vreg_arguments);
- HInstruction* object = new (arena_) HNewArray(length, dex_offset, type_index);
+ HInstruction* object = new (arena_) HNewArray(length, dex_pc, type_index);
current_block_->AddInstruction(object);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
@@ -613,13 +654,13 @@
bool is_reference_array = (primitive == 'L') || (primitive == '[');
Primitive::Type type = is_reference_array ? Primitive::kPrimNot : Primitive::kPrimInt;
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
temps.Add(object);
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
HInstruction* index = GetIntConstant(i);
current_block_->AddInstruction(
- new (arena_) HArraySet(object, index, value, type, dex_offset));
+ new (arena_) HArraySet(object, index, value, type, dex_pc));
}
latest_result_ = object;
}
@@ -629,26 +670,26 @@
const T* data,
uint32_t element_count,
Primitive::Type anticipated_type,
- uint32_t dex_offset) {
+ uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = GetIntConstant(i);
HInstruction* value = GetIntConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, anticipated_type, dex_offset));
+ object, index, value, anticipated_type, dex_pc));
}
}
-void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset) {
- Temporaries temps(graph_, 1);
+void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc) {
+ Temporaries temps(graph_);
HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(array, dex_offset);
+ HNullCheck* null_check = new (arena_) HNullCheck(array, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
HInstruction* length = new (arena_) HArrayLength(null_check);
current_block_->AddInstruction(length);
- int32_t payload_offset = instruction.VRegB_31t() + dex_offset;
+ int32_t payload_offset = instruction.VRegB_31t() + dex_pc;
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
const uint8_t* data = payload->data;
@@ -657,7 +698,7 @@
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
HInstruction* last_index = GetIntConstant(payload->element_count - 1);
- current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_offset));
+ current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
case 1:
@@ -665,27 +706,27 @@
reinterpret_cast<const int8_t*>(data),
element_count,
Primitive::kPrimByte,
- dex_offset);
+ dex_pc);
break;
case 2:
BuildFillArrayData(null_check,
reinterpret_cast<const int16_t*>(data),
element_count,
Primitive::kPrimShort,
- dex_offset);
+ dex_pc);
break;
case 4:
BuildFillArrayData(null_check,
reinterpret_cast<const int32_t*>(data),
element_count,
Primitive::kPrimInt,
- dex_offset);
+ dex_pc);
break;
case 8:
BuildFillWideArrayData(null_check,
reinterpret_cast<const int64_t*>(data),
element_count,
- dex_offset);
+ dex_pc);
break;
default:
LOG(FATAL) << "Unknown element width for " << payload->element_width;
@@ -695,24 +736,56 @@
void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
- uint32_t dex_offset) {
+ uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = GetIntConstant(i);
HInstruction* value = GetLongConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, Primitive::kPrimLong, dex_offset));
+ object, index, value, Primitive::kPrimLong, dex_pc));
}
}
-void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {
+bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc) {
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc);
+ current_block_->AddInstruction(cls);
+ // The class needs a temporary before being used by the type check.
+ Temporaries temps(graph_);
+ temps.Add(cls);
+ if (instruction.Opcode() == Instruction::INSTANCE_OF) {
+ current_block_->AddInstruction(
+ new (arena_) HInstanceOf(object, cls, type_known_final, dex_pc));
+ UpdateLocal(destination, current_block_->GetLastInstruction());
+ } else {
+ DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
+ current_block_->AddInstruction(
+ new (arena_) HCheckCast(object, cls, type_known_final, dex_pc));
+ }
+ return true;
+}
+
+void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc) {
if (target_offset <= 0) {
// Unconditionnally add a suspend check to backward branches. We can remove
// them after we recognize loops in the graph.
- current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_offset));
+ current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_pc));
}
}
-bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset) {
+bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc) {
if (current_block_ == nullptr) {
return true; // Dead code
}
@@ -815,8 +888,8 @@
}
#define IF_XX(comparison, cond) \
- case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_offset); break; \
- case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_offset); break
+ case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_pc); break; \
+ case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_pc); break
IF_XX(HEqual, EQ);
IF_XX(HNotEqual, NE);
@@ -829,8 +902,8 @@
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
int32_t offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(offset, dex_offset);
- HBasicBlock* target = FindBlockStartingAt(offset + dex_offset);
+ PotentiallyAddSuspendCheck(offset, dex_pc);
+ HBasicBlock* target = FindBlockStartingAt(offset + dex_pc);
DCHECK(target != nullptr);
current_block_->AddInstruction(new (arena_) HGoto());
current_block_->AddSuccessor(target);
@@ -858,29 +931,31 @@
break;
}
- case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_DIRECT:
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_INTERFACE: {
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- if (!BuildInvoke(instruction, dex_offset, method_idx,
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
number_of_vreg_arguments, false, args, -1)) {
return false;
}
break;
}
- case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_DIRECT_RANGE:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- case Instruction::INVOKE_INTERFACE_RANGE: {
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
- if (!BuildInvoke(instruction, dex_offset, method_idx,
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
number_of_vreg_arguments, true, nullptr, register_index)) {
return false;
}
@@ -922,6 +997,21 @@
break;
}
+ case Instruction::LONG_TO_INT: {
+ Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::INT_TO_BYTE: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte);
+ break;
+ }
+
+ case Instruction::INT_TO_CHAR: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimChar);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -989,17 +1079,53 @@
case Instruction::DIV_INT: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
- dex_offset, Primitive::kPrimInt, false);
+ dex_pc, Primitive::kPrimInt, false);
+ break;
+ }
+
+ case Instruction::DIV_LONG: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimLong, false);
break;
}
case Instruction::DIV_FLOAT: {
- Binop_23x<HDiv>(instruction, Primitive::kPrimFloat);
+ Binop_23x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::DIV_DOUBLE: {
- Binop_23x<HDiv>(instruction, Primitive::kPrimDouble);
+ Binop_23x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::AND_INT: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimLong);
break;
}
@@ -1060,17 +1186,53 @@
case Instruction::DIV_INT_2ADDR: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
- dex_offset, Primitive::kPrimInt, false);
+ dex_pc, Primitive::kPrimInt, false);
+ break;
+ }
+
+ case Instruction::DIV_LONG_2ADDR: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_pc, Primitive::kPrimLong, false);
break;
}
case Instruction::DIV_FLOAT_2ADDR: {
- Binop_12x<HDiv>(instruction, Primitive::kPrimFloat);
+ Binop_12x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::DIV_DOUBLE_2ADDR: {
- Binop_12x<HDiv>(instruction, Primitive::kPrimDouble);
+ Binop_12x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::AND_INT_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimLong);
break;
}
@@ -1079,6 +1241,21 @@
break;
}
+ case Instruction::AND_INT_LIT16: {
+ Binop_22s<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT16: {
+ Binop_22s<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT16: {
+ Binop_22s<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT: {
Binop_22s<HSub>(instruction, true);
break;
@@ -1094,6 +1271,21 @@
break;
}
+ case Instruction::AND_INT_LIT8: {
+ Binop_22b<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT8: {
+ Binop_22b<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT8: {
+ Binop_22b<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT_LIT8: {
Binop_22b<HSub>(instruction, true);
break;
@@ -1107,13 +1299,13 @@
case Instruction::DIV_INT_LIT16:
case Instruction::DIV_INT_LIT8: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
- dex_offset, Primitive::kPrimInt, true);
+ dex_pc, Primitive::kPrimInt, true);
break;
}
case Instruction::NEW_INSTANCE: {
current_block_->AddInstruction(
- new (arena_) HNewInstance(dex_offset, instruction.VRegB_21c()));
+ new (arena_) HNewInstance(dex_pc, instruction.VRegB_21c()));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
break;
}
@@ -1121,7 +1313,7 @@
case Instruction::NEW_ARRAY: {
HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
current_block_->AddInstruction(
- new (arena_) HNewArray(length, dex_offset, instruction.VRegC_22c()));
+ new (arena_) HNewArray(length, dex_pc, instruction.VRegC_22c()));
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
}
@@ -1131,7 +1323,7 @@
uint32_t type_index = instruction.VRegB_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- BuildFilledNewArray(dex_offset, type_index, number_of_vreg_arguments, false, args, 0);
+ BuildFilledNewArray(dex_pc, type_index, number_of_vreg_arguments, false, args, 0);
break;
}
@@ -1140,12 +1332,12 @@
uint32_t type_index = instruction.VRegB_3rc();
uint32_t register_index = instruction.VRegC_3rc();
BuildFilledNewArray(
- dex_offset, type_index, number_of_vreg_arguments, true, nullptr, register_index);
+ dex_pc, type_index, number_of_vreg_arguments, true, nullptr, register_index);
break;
}
case Instruction::FILL_ARRAY_DATA: {
- BuildFillArrayData(instruction, dex_offset);
+ BuildFillArrayData(instruction, dex_pc);
break;
}
@@ -1171,7 +1363,7 @@
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
- if (!BuildInstanceFieldAccess(instruction, dex_offset, false)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, false)) {
return false;
}
break;
@@ -1184,7 +1376,7 @@
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
- if (!BuildInstanceFieldAccess(instruction, dex_offset, true)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, true)) {
return false;
}
break;
@@ -1197,7 +1389,7 @@
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
- if (!BuildStaticFieldAccess(instruction, dex_offset, false)) {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, false)) {
return false;
}
break;
@@ -1210,7 +1402,7 @@
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
- if (!BuildStaticFieldAccess(instruction, dex_offset, true)) {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, true)) {
return false;
}
break;
@@ -1218,11 +1410,11 @@
#define ARRAY_XX(kind, anticipated_type) \
case Instruction::AGET##kind: { \
- BuildArrayAccess(instruction, dex_offset, false, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, false, anticipated_type); \
break; \
} \
case Instruction::APUT##kind: { \
- BuildArrayAccess(instruction, dex_offset, true, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, true, anticipated_type); \
break; \
}
@@ -1238,7 +1430,7 @@
HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot);
// No need for a temporary for the null check, it is the only input of the following
// instruction.
- object = new (arena_) HNullCheck(object, dex_offset);
+ object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
current_block_->AddInstruction(new (arena_) HArrayLength(object));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
@@ -1246,13 +1438,13 @@
}
case Instruction::CONST_STRING: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_offset));
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING_JUMBO: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_offset));
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
break;
}
@@ -1269,7 +1461,7 @@
return false;
}
current_block_->AddInstruction(
- new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset));
+ new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -1282,7 +1474,7 @@
case Instruction::THROW: {
HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot);
- current_block_->AddInstruction(new (arena_) HThrow(exception, dex_offset));
+ current_block_->AddInstruction(new (arena_) HThrow(exception, dex_pc));
// A throw instruction must branch to the exit block.
current_block_->AddSuccessor(exit_block_);
// We finished building this block. Set the current block to null to avoid
@@ -1292,25 +1484,37 @@
}
case Instruction::INSTANCE_OF: {
+ uint8_t destination = instruction.VRegA_22c();
+ uint8_t reference = instruction.VRegB_22c();
uint16_t type_index = instruction.VRegC_22c();
- bool type_known_final;
- bool type_known_abstract;
- bool is_referrers_class;
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
- &type_known_final, &type_known_abstract, &is_referrers_class);
- if (!can_access) {
+ if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_pc)) {
return false;
}
- HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset);
- current_block_->AddInstruction(cls);
- // The class needs a temporary before being used by the type check.
- Temporaries temps(graph_, 1);
- temps.Add(cls);
- current_block_->AddInstruction(
- new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset));
- UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CHECK_CAST: {
+ uint8_t reference = instruction.VRegA_21c();
+ uint16_t type_index = instruction.VRegB_21c();
+ if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_pc)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::MONITOR_ENTER: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kEnter,
+ dex_pc));
+ break;
+ }
+
+ case Instruction::MONITOR_EXIT: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kExit,
+ dex_pc));
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 09c9a51..799e628 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -76,7 +76,7 @@
// Analyzes the dex instruction and adds HInstruction to the graph
// to execute that instruction. Returns whether the instruction can
// be handled.
- bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset);
+ bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc);
// Finds all instructions that start a new block, and populates branch_targets_ with
// the newly created blocks.
@@ -92,7 +92,7 @@
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
- void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset);
+ void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc);
void InitializeParameters(uint16_t number_of_parameters);
template<typename T>
@@ -102,16 +102,22 @@
void Binop_23x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_23x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_12x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_22b(const Instruction& instruction, bool reverse);
template<typename T>
void Binop_22s(const Instruction& instruction, bool reverse);
- template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_offset);
- template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_offset);
+ template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_pc);
+ template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_pc);
void Conversion_12x(const Instruction& instruction,
Primitive::Type input_type,
@@ -119,27 +125,27 @@
void BuildCheckedDiv(uint16_t out_reg,
uint16_t first_reg,
- int32_t second_reg, // can be a constant
- uint32_t dex_offset,
+ int64_t second_reg_or_constant,
+ uint32_t dex_pc,
Primitive::Type type,
bool second_is_lit);
void BuildReturn(const Instruction& instruction, Primitive::Type type);
// Builds an instance field access node and returns whether the instruction is supported.
- bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_put);
+ bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
// Builds a static field access node and returns whether the instruction is supported.
- bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_put);
+ bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
void BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_get,
Primitive::Type anticipated_type);
// Builds an invocation node and returns whether the instruction is supported.
bool BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
@@ -147,14 +153,14 @@
uint32_t register_index);
// Builds a new array node and the instructions that fill it.
- void BuildFilledNewArray(uint32_t dex_offset,
+ void BuildFilledNewArray(uint32_t dex_pc,
uint32_t type_index,
uint32_t number_of_vreg_arguments,
bool is_range,
uint32_t* args,
uint32_t register_index);
- void BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset);
+ void BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc);
// Fills the given object with data as specified in the fill-array-data
// instruction. Currently only used for non-reference and non-floating point
@@ -164,14 +170,22 @@
const T* data,
uint32_t element_count,
Primitive::Type anticipated_type,
- uint32_t dex_offset);
+ uint32_t dex_pc);
// Fills the given object with data as specified in the fill-array-data
// instruction. The data must be for long and double arrays.
void BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
- uint32_t dex_offset);
+ uint32_t dex_pc);
+
+ // Builds a `HInstanceOf`, or a `HCheckCast` instruction.
+ // Returns whether we succeeded in building the instruction.
+ bool BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc);
ArenaAllocator* const arena_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9d17263..4d71cb7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -51,7 +51,7 @@
MarkNotLeaf();
}
ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs()
- + GetGraph()->GetNumberOfTemporaries()
+ + GetGraph()->GetTemporariesVRegSlots()
+ 1 /* filler */,
0, /* the baseline compiler does not have live registers at slow path */
GetGraph()->GetMaximumNumberOfOutVRegs()
@@ -150,12 +150,15 @@
Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
+ // The type of the previous instruction tells us if we need a single or double stack slot.
+ Primitive::Type type = temp->GetType();
+ int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
// Use the temporary region (right below the dex registers).
int32_t slot = GetFrameSize() - FrameEntrySpillSize()
- kVRegSize // filler
- (number_of_locals * kVRegSize)
- - ((1 + temp->GetIndex()) * kVRegSize);
- return Location::StackSlot(slot);
+ - ((temp_size + temp->GetIndex()) * kVRegSize);
+ return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
}
int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
@@ -632,4 +635,13 @@
}
}
+void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) {
+ MoveOperands move1(from1, to1, nullptr);
+ MoveOperands move2(from2, to2, nullptr);
+ HParallelMove parallel_move(GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ GetMoveResolver()->EmitNativeCode(¶llel_move);
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index fc4ea4b..63bf96c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
+#include "arch/instruction_set.h"
#include "base/bit_field.h"
#include "globals.h"
-#include "instruction_set.h"
#include "locations.h"
#include "memory_region.h"
#include "nodes.h"
@@ -33,6 +33,7 @@
class Assembler;
class CodeGenerator;
class DexCompilationUnit;
+class ParallelMoveResolver;
class SrcMap;
class CodeAllocator {
@@ -165,6 +166,8 @@
// of the architecture.
static size_t GetCacheOffset(uint32_t index);
+ void EmitParallelMoves(Location from1, Location to1, Location from2, Location to2);
+
protected:
CodeGenerator(HGraph* graph,
size_t number_of_core_registers,
@@ -197,6 +200,8 @@
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 467c2a6..09e1b97 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -22,9 +22,9 @@
#include "mirror/art_method.h"
#include "mirror/class.h"
#include "thread.h"
-#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
#include "utils/stack_checks.h"
namespace art {
@@ -41,7 +41,7 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2 };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr SRegister kRuntimeParameterFpuRegisters[] = { };
@@ -169,11 +169,14 @@
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
}
@@ -269,13 +272,19 @@
class TypeCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathARM(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
@@ -284,28 +293,29 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- arm_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc());
- arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ if (instruction_->IsInstanceOf()) {
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ }
codegen->RestoreLiveRegisters(locations);
__ b(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
@@ -660,13 +670,13 @@
__ LoadSFromOffset(destination.As<SRegister>(), SP, source.GetStackIndex());
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ StoreToOffset(kStoreWord, source.As<Register>(), SP, destination.GetStackIndex());
} else if (source.IsFpuRegister()) {
__ StoreSToOffset(source.As<SRegister>(), SP, destination.GetStackIndex());
} else {
- DCHECK(source.IsStackSlot());
+ DCHECK(source.IsStackSlot()) << source;
__ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
__ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
@@ -768,26 +778,29 @@
return;
}
- if (instruction->IsIntConstant()) {
- int32_t value = instruction->AsIntConstant()->GetValue();
- if (location.IsRegister()) {
- __ LoadImmediate(location.As<Register>(), value);
- } else {
- DCHECK(location.IsStackSlot());
- __ LoadImmediate(IP, value);
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegisterPair()) {
- __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
- __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
- } else {
- DCHECK(location.IsDoubleStackSlot());
- __ LoadImmediate(IP, Low32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- __ LoadImmediate(IP, High32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ int32_t value = const_to_move->AsIntConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ LoadImmediate(location.As<Register>(), value);
+ } else {
+ DCHECK(location.IsStackSlot());
+ __ LoadImmediate(IP, value);
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
+ __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
+ } else {
+ DCHECK(location.IsDoubleStackSlot());
+ __ LoadImmediate(IP, Low32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ __ LoadImmediate(IP, High32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ }
}
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
@@ -812,7 +825,12 @@
}
} else if (instruction->IsTemporary()) {
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move32(location, temp_location);
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -1333,13 +1351,49 @@
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
@@ -1356,7 +1410,23 @@
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1376,13 +1446,57 @@
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ __ sbfx(out.As<Register>(), in.As<Register>(), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ DCHECK(out.IsRegister());
+ if (in.IsRegisterPair()) {
+ __ Mov(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ LoadFromOffset(kLoadWord, out.As<Register>(), SP, in.GetStackIndex());
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ LoadImmediate(out.As<Register>(), static_cast<int32_t>(value));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
DCHECK(out.IsRegisterPair());
DCHECK(in.IsRegister());
__ Mov(out.AsRegisterPairLow<Register>(), in.As<Register>());
@@ -1404,7 +1518,22 @@
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ __ ubfx(out.As<Register>(), in.As<Register>(), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1636,8 +1765,11 @@
}
void LocationsBuilderARM::VisitDiv(HDiv* div) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -1646,7 +1778,13 @@
break;
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // The runtime helper puts the output in R0,R2.
+ locations->SetOut(Location::RegisterPairLocation(R0, R2));
break;
}
case Primitive::kPrimFloat:
@@ -1675,7 +1813,15 @@
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(R2, out.AsRegisterPairHigh<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc());
break;
}
@@ -1699,7 +1845,7 @@
void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -1712,9 +1858,36 @@
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- DCHECK(value.IsRegister()) << value;
- __ cmp(value.As<Register>(), ShifterOperand(0));
- __ b(slow_path->GetEntryLabel(), EQ);
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ cmp(value.As<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ __ orrs(IP,
+ value.AsRegisterPairLow<Register>(),
+ ShifterOperand(value.AsRegisterPairHigh<Register>()));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
+ }
}
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
@@ -2658,7 +2831,7 @@
QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
}
-void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2668,7 +2841,7 @@
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Register cls = locations->InAt(1).As<Register>();
@@ -2693,7 +2866,7 @@
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), NE);
__ LoadImmediate(out, 1);
@@ -2707,5 +2880,121 @@
__ Bind(&done);
}
+void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(slow_path->GetExitLabel(), EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ cmp(temp, ShifterOperand(cls));
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ codegen_->InvokeRuntime(instruction->IsEnter()
+ ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc());
+}
+
+void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ bool output_overlaps = (instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetOut(Location::RequiresRegister(), output_overlaps);
+}
+
+void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ Register first = locations->InAt(0).As<Register>();
+ Register second = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ if (instruction->IsAnd()) {
+ __ and_(out, first, ShifterOperand(second));
+ } else if (instruction->IsOr()) {
+ __ orr(out, first, ShifterOperand(second));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out, first, ShifterOperand(second));
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location out = locations->Out();
+ if (instruction->IsAnd()) {
+ __ and_(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ and_(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else if (instruction->IsOr()) {
+ __ orr(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ orr(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ eor(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
+ }
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5d51993..acc3fd6 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -105,9 +105,10 @@
#undef DECLARE_VISIT_INSTRUCTION
- void HandleInvoke(HInvoke* invoke);
-
private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
+
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -133,6 +134,7 @@
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
ArmAssembler* const assembler_;
CodeGeneratorARM* const codegen_;
@@ -186,7 +188,7 @@
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverARM* GetMoveResolver() {
+ ParallelMoveResolverARM* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4dc836f..887a4ef 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -631,18 +631,23 @@
codegen_(codegen) {}
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(And) \
+ M(CheckCast) \
M(ClinitCheck) \
M(DivZeroCheck) \
+ M(InstanceOf) \
M(InvokeInterface) \
M(LoadClass) \
M(LoadException) \
M(LoadString) \
+ M(MonitorOperation) \
+ M(Or) \
M(ParallelMove) \
M(StaticFieldGet) \
M(StaticFieldSet) \
M(Throw) \
- M(TypeCheck) \
M(TypeConversion) \
+ M(Xor) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f2ead21..54e87f4 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -230,6 +230,11 @@
void Load(Primitive::Type type, vixl::Register dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::Register rt, const vixl::MemOperand& dst);
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: MoveResolver";
+ return nullptr;
+ }
+
private:
// Labels for each block that will be compiled.
vixl::Label* block_labels_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d66180b..8a8fec2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -36,7 +36,7 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { };
@@ -140,9 +140,14 @@
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x86_codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -270,13 +275,19 @@
class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
public:
- TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -285,28 +296,33 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x86_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ x86_codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ if (instruction_->IsInstanceOf()) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
+ }
+
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+ if (instruction_->IsInstanceOf()) {
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
};
@@ -559,7 +575,7 @@
__ movss(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
} else if (source.IsFpuRegister()) {
@@ -620,7 +636,7 @@
LOG(FATAL) << "Unimplemented";
}
} else {
- DCHECK(destination.IsDoubleStackSlot());
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
if (source.IsRegisterPair()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
@@ -646,31 +662,44 @@
}
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<Register>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
- __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
- } else if (location.IsDoubleStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<Register>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
+ __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), instruction);
+ }
}
} else if (instruction->IsTemporary()) {
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move32(location, temp_location);
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
} else if (instruction->IsLoadLocal()) {
int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
switch (instruction->GetType()) {
@@ -702,12 +731,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- Move32(location, instruction->GetLocations()->Out());
+ Move32(location, locations->Out());
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- Move64(location, instruction->GetLocations()->Out());
+ Move64(location, locations->Out());
break;
default:
@@ -1261,13 +1290,49 @@
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
locations->SetInAt(0, Location::RegisterLocation(EAX));
locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
@@ -1284,7 +1349,23 @@
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1304,13 +1385,64 @@
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<Register>(), in.As<ByteRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int8_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ if (in.IsRegisterPair()) {
+ __ movl(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
DCHECK_EQ(in.As<Register>(), EAX);
@@ -1329,7 +1461,30 @@
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `Process a Dex `int-to-char'' instruction.
+ if (in.IsRegister()) {
+ __ movzxw(out.As<Register>(), in.As<Register>());
+ } else if (in.IsStackSlot()) {
+ __ movzxw(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<uint16_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1386,7 +1541,7 @@
}
case Primitive::kPrimLong: {
- if (second.IsRegister()) {
+ if (second.IsRegisterPair()) {
__ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
@@ -1462,7 +1617,7 @@
}
case Primitive::kPrimLong: {
- if (second.IsRegister()) {
+ if (second.IsRegisterPair()) {
__ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
@@ -1599,8 +1754,11 @@
}
void LocationsBuilderX86::VisitDiv(HDiv* div) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RegisterLocation(EAX));
@@ -1611,7 +1769,13 @@
break;
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // Runtime helper puts the result in EAX, EDX.
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
}
case Primitive::kPrimFloat:
@@ -1629,12 +1793,13 @@
void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
LocationSummary* locations = div->GetLocations();
+ Location out = locations->Out();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- DCHECK(first.Equals(locations->Out()));
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
+ DCHECK(first.Equals(out));
Register first_reg = first.As<Register>();
Register second_reg = second.As<Register>();
DCHECK_EQ(EAX, first_reg);
@@ -1661,16 +1826,28 @@
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(EAX, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv)));
+ codegen_->RecordPcInfo(div, div->GetDexPc());
+
break;
}
case Primitive::kPrimFloat: {
+ DCHECK(first.Equals(out));
__ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
+ DCHECK(first.Equals(out));
__ divsd(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
@@ -1683,7 +1860,21 @@
void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::Any());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::Any());
+ break;
+ }
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (!instruction->IsConstant()) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -1696,18 +1887,39 @@
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- if (value.IsRegister()) {
- __ testl(value.As<Register>(), value.As<Register>());
- } else if (value.IsStackSlot()) {
- __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<Register>(), value.As<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
}
- return;
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ Register temp = locations->GetTemp(0).As<Register>();
+ __ movl(temp, value.AsRegisterPairLow<Register>());
+ __ orl(temp, value.AsRegisterPairHigh<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck" << instruction->GetType();
}
- __ j(kEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
@@ -2753,7 +2965,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2763,7 +2975,7 @@
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Location cls = locations->InAt(1);
@@ -2794,7 +3006,7 @@
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2808,5 +3020,148 @@
__ Bind(&done);
}
+void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ __ movl(temp, Address(obj, class_offset));
+
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ fs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject)));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), second.As<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), second.As<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), second.As<Register>());
+ }
+ } else if (second.IsConstant()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (second.IsRegisterPair()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ andl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ orl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ xorl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ andl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ orl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ xorl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ }
+ }
+ }
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 85fe21c..841b28b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -100,9 +100,10 @@
#undef DECLARE_VISIT_INSTRUCTION
+ private:
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
void HandleInvoke(HInvoke* invoke);
- private:
CodeGeneratorX86* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -128,6 +129,7 @@
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeX86* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
@@ -181,7 +183,7 @@
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverX86* GetMoveResolver() {
+ ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e09b6ca..5aa1c4a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -108,16 +108,23 @@
class DivMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- explicit DivMinusOneSlowPathX86_64(Register reg) : reg_(reg) {}
+ explicit DivMinusOneSlowPathX86_64(Register reg, Primitive::Type type)
+ : reg_(reg), type_(type) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
- __ negl(CpuRegister(reg_));
+ if (type_ == Primitive::kPrimInt) {
+ __ negl(CpuRegister(reg_));
+ } else {
+ DCHECK_EQ(Primitive::kPrimLong, type_);
+ __ negq(CpuRegister(reg_));
+ }
__ jmp(GetExitLabel());
}
private:
- Register reg_;
+ const Register reg_;
+ const Primitive::Type type_;
DISALLOW_COPY_AND_ASSIGN(DivMinusOneSlowPathX86_64);
};
@@ -179,13 +186,15 @@
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -284,13 +293,19 @@
class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86_64(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -299,29 +314,35 @@
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x64_codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ if (instruction_->IsInstanceOf()) {
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
+ }
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+
+ if (instruction_->IsInstanceOf()) {
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
};
@@ -563,26 +584,34 @@
void CodeGeneratorX86_64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<CpuRegister>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movq(location.As<CpuRegister>(), Immediate(value));
- } else if (location.IsDoubleStackSlot()) {
- __ movq(CpuRegister(TMP), Immediate(value));
- __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<CpuRegister>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ movq(location.As<CpuRegister>(), Immediate(value));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movq(CpuRegister(TMP), Immediate(value));
+ __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
}
} else if (instruction->IsLoadLocal()) {
switch (instruction->GetType()) {
@@ -619,7 +648,7 @@
case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- Move(location, instruction->GetLocations()->Out());
+ Move(location, locations->Out());
break;
default:
@@ -1259,13 +1288,49 @@
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
// TODO: We would benefit from a (to-be-implemented)
// Location::RegisterOrStackSlot requirement for this input.
locations->SetInAt(0, Location::RequiresRegister());
@@ -1284,7 +1349,23 @@
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1304,6 +1385,59 @@
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<int8_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ if (in.IsRegister()) {
+ __ movl(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
DCHECK(out.IsRegister());
@@ -1311,7 +1445,7 @@
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
DCHECK(in.IsRegister());
__ movsxd(out.As<CpuRegister>(), in.As<CpuRegister>());
break;
@@ -1328,7 +1462,31 @@
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ if (in.IsRegister()) {
+ __ movzxw(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movzxw(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<uint16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1547,7 +1705,8 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RegisterLocation(RAX));
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
@@ -1555,10 +1714,7 @@
locations->AddTemp(Location::RegisterLocation(RDX));
break;
}
- case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
- break;
- }
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -1578,38 +1734,42 @@
Location second = locations->InAt(1);
DCHECK(first.Equals(locations->Out()));
- switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
+ Primitive::Type type = div->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
CpuRegister first_reg = first.As<CpuRegister>();
CpuRegister second_reg = second.As<CpuRegister>();
DCHECK_EQ(RAX, first_reg.AsRegister());
DCHECK_EQ(RDX, locations->GetTemp(0).As<CpuRegister>().AsRegister());
SlowPathCodeX86_64* slow_path =
- new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister());
+ new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister(), type);
codegen_->AddSlowPath(slow_path);
- // 0x80000000/-1 triggers an arithmetic exception!
- // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
- // it's safe to just use negl instead of more complex comparisons.
+ // 0x80000000(00000000)/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000(00000000) = 0x80000000(00000000)
+ // so it's safe to just use negl instead of more complex comparisons.
__ cmpl(second_reg, Immediate(-1));
__ j(kEqual, slow_path->GetEntryLabel());
- // edx:eax <- sign-extended of eax
- __ cdq();
- // eax = quotient, edx = remainder
- __ idivl(second_reg);
+ if (type == Primitive::kPrimInt) {
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+ } else {
+ // rdx:rax <- sign-extended of rax
+ __ cqo();
+ // rax = quotient, rdx = remainder
+ __ idivq(second_reg);
+ }
__ Bind(slow_path->GetExitLabel());
break;
}
- case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
- break;
- }
-
case Primitive::kPrimFloat: {
__ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
@@ -1642,18 +1802,40 @@
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- if (value.IsRegister()) {
- __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
- } else if (value.IsStackSlot()) {
- __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
}
- return;
+ case Primitive::kPrimLong: {
+ if (value.IsRegister()) {
+ __ testq(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsDoubleStackSlot()) {
+ __ cmpq(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
}
- __ j(kEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
@@ -2743,7 +2925,7 @@
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2753,7 +2935,7 @@
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location cls = locations->InAt(1);
@@ -2783,7 +2965,7 @@
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, Location::RegisterLocation(out.AsRegister()));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2797,5 +2979,135 @@
__ Bind(&done);
}
+void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ movl(temp, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ // Classes must be equal for the checkcast to succeed.
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ gs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pUnlockObject),
+ true));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->GetType() == Primitive::kPrimInt) {
+ locations->SetInAt(1, Location::Any());
+ } else {
+ // Request a register to avoid loading a 64bits constant.
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86_64::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), imm);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), imm);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), imm);
+ }
+ } else {
+ Address address(CpuRegister(RSP), second.GetStackIndex());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), address);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), address);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), address);
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (instruction->IsAnd()) {
+ __ andq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ }
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9565b6f..4c6e475 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -104,9 +104,10 @@
#undef DECLARE_VISIT_INSTRUCTION
- void HandleInvoke(HInvoke* invoke);
-
private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
+
CodeGeneratorX86_64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -132,6 +133,7 @@
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
X86_64Assembler* const assembler_;
CodeGeneratorX86_64* const codegen_;
@@ -171,7 +173,7 @@
return &assembler_;
}
- ParallelMoveResolverX86_64* GetMoveResolver() {
+ ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index ecee443..fee3ea6 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -16,6 +16,7 @@
#include <functional>
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
@@ -25,7 +26,6 @@
#include "common_compiler_test.h"
#include "dex_file.h"
#include "dex_instruction.h"
-#include "instruction_set.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "prepare_for_register_allocation.h"
@@ -39,7 +39,7 @@
class InternalCodeAllocator : public CodeAllocator {
public:
- InternalCodeAllocator() { }
+ InternalCodeAllocator() : size_(0) { }
virtual uint8_t* Allocate(size_t size) {
size_ = size;
@@ -362,6 +362,27 @@
#undef NOT_LONG_TEST
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_IntToLongOfLongToInt) {
+#else
+TEST(CodegenTest, IntToLongOfLongToInt) {
+#endif
+ const int64_t input = INT64_C(4294967296); // 2^32
+ const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
+ const uint16_t word1 = High16Bits(Low32Bits(input));
+ const uint16_t word2 = Low16Bits(High32Bits(input));
+ const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
+ const uint16_t data[] = FIVE_REGISTERS_CODE_ITEM(
+ Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
+ Instruction::CONST_WIDE | 2 << 8, 1, 0, 0, 0,
+ Instruction::ADD_LONG | 0, 0 << 8 | 2, // v0 <- 2^32 + 1
+ Instruction::LONG_TO_INT | 4 << 8 | 0 << 12,
+ Instruction::INT_TO_LONG | 2 << 8 | 4 << 12,
+ Instruction::RETURN_WIDE | 2 << 8);
+
+ TestCodeLong(data, true, 1);
+}
+
TEST(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 2dab605..5af3cdd 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -90,7 +90,7 @@
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
- number_of_temporaries_(0),
+ temporaries_vreg_slots_(0),
current_instruction_id_(0) {}
ArenaAllocator* GetArena() const { return arena_; }
@@ -129,12 +129,12 @@
maximum_number_of_out_vregs_ = std::max(new_value, maximum_number_of_out_vregs_);
}
- void UpdateNumberOfTemporaries(size_t count) {
- number_of_temporaries_ = std::max(count, number_of_temporaries_);
+ void UpdateTemporariesVRegSlots(size_t slots) {
+ temporaries_vreg_slots_ = std::max(slots, temporaries_vreg_slots_);
}
- size_t GetNumberOfTemporaries() const {
- return number_of_temporaries_;
+ size_t GetTemporariesVRegSlots() const {
+ return temporaries_vreg_slots_;
}
void SetNumberOfVRegs(uint16_t number_of_vregs) {
@@ -192,8 +192,8 @@
// The number of virtual registers used by parameters of this method.
uint16_t number_of_in_vregs_;
- // The number of temporaries that will be needed for the baseline compiler.
- size_t number_of_temporaries_;
+ // Number of vreg size slots that the temporaries use (used in baseline compiler).
+ size_t temporaries_vreg_slots_;
// The current id to assign to a newly added instruction. See HInstruction.id_.
int current_instruction_id_;
@@ -475,10 +475,12 @@
#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Add, BinaryOperation) \
+ M(And, BinaryOperation) \
M(ArrayGet, Instruction) \
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
M(BoundsCheck, Instruction) \
+ M(CheckCast, Instruction) \
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
@@ -494,6 +496,7 @@
M(If, Instruction) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
+ M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(InvokeInterface, Invoke) \
M(InvokeStatic, Invoke) \
@@ -506,6 +509,7 @@
M(LoadString, Instruction) \
M(Local, Instruction) \
M(LongConstant, Constant) \
+ M(MonitorOperation, Instruction) \
M(Mul, BinaryOperation) \
M(Neg, UnaryOperation) \
M(NewArray, Instruction) \
@@ -513,6 +517,7 @@
M(Not, UnaryOperation) \
M(NotEqual, Condition) \
M(NullCheck, Instruction) \
+ M(Or, BinaryOperation) \
M(ParallelMove, Instruction) \
M(ParameterValue, Instruction) \
M(Phi, Instruction) \
@@ -525,8 +530,8 @@
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
M(Throw, Instruction) \
- M(TypeCheck, Instruction) \
M(TypeConversion, Instruction) \
+ M(Xor, BinaryOperation) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1745,8 +1750,8 @@
class HDiv : public HBinaryOperation {
public:
- HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
+ : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
virtual int32_t Evaluate(int32_t x, int32_t y) const {
// Our graph structure ensures we never have 0 for `y` during constant folding.
@@ -1756,9 +1761,13 @@
}
virtual int64_t Evaluate(int64_t x, int64_t y) const { return x / y; }
+ uint32_t GetDexPc() const { return dex_pc_; }
+
DECLARE_INSTRUCTION(Div);
private:
+ const uint32_t dex_pc_;
+
DISALLOW_COPY_AND_ASSIGN(HDiv);
};
@@ -1789,6 +1798,54 @@
DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
};
+class HAnd : public HBinaryOperation {
+ public:
+ HAnd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x & y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x & y; }
+
+ DECLARE_INSTRUCTION(And);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HAnd);
+};
+
+class HOr : public HBinaryOperation {
+ public:
+ HOr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x | y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x | y; }
+
+ DECLARE_INSTRUCTION(Or);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HOr);
+};
+
+class HXor : public HBinaryOperation {
+ public:
+ HXor(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x ^ y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x ^ y; }
+
+ DECLARE_INSTRUCTION(Xor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HXor);
+};
+
// The value of a parameter in this method. Its location depends on
// the calling convention.
class HParameterValue : public HExpression<0> {
@@ -2105,8 +2162,8 @@
* Some DEX instructions are folded into multiple HInstructions that need
* to stay live until the last HInstruction. This class
* is used as a marker for the baseline compiler to ensure its preceding
- * HInstruction stays live. `index` is the temporary number that is used
- * for knowing the stack offset where to store the instruction.
+ * HInstruction stays live. `index` represents the stack location index of the
+ * instruction (the actual offset is computed as index * vreg_size).
*/
class HTemporary : public HTemplateInstruction<0> {
public:
@@ -2114,7 +2171,11 @@
size_t GetIndex() const { return index_; }
- Primitive::Type GetType() const OVERRIDE { return GetPrevious()->GetType(); }
+ Primitive::Type GetType() const OVERRIDE {
+ // The previous instruction is the one that will be stored in the temporary location.
+ DCHECK(GetPrevious() != nullptr);
+ return GetPrevious()->GetType();
+ }
DECLARE_INSTRUCTION(Temporary);
@@ -2351,12 +2412,12 @@
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
-class HTypeCheck : public HExpression<2> {
+class HInstanceOf : public HExpression<2> {
public:
- explicit HTypeCheck(HInstruction* object,
- HLoadClass* constant,
- bool class_is_final,
- uint32_t dex_pc)
+ HInstanceOf(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
: HExpression(Primitive::kPrimBoolean, SideEffects::None()),
class_is_final_(class_is_final),
dex_pc_(dex_pc) {
@@ -2366,13 +2427,11 @@
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool NeedsEnvironment() const OVERRIDE {
- // TODO: Can we debug when doing a runtime instanceof check?
return false;
}
@@ -2380,13 +2439,82 @@
bool IsClassFinal() const { return class_is_final_; }
- DECLARE_INSTRUCTION(TypeCheck);
+ DECLARE_INSTRUCTION(InstanceOf);
private:
const bool class_is_final_;
const uint32_t dex_pc_;
- DISALLOW_COPY_AND_ASSIGN(HTypeCheck);
+ DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
+};
+
+class HCheckCast : public HTemplateInstruction<2> {
+ public:
+ HCheckCast(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // Instruction may throw a CheckCastError.
+ return true;
+ }
+
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(CheckCast);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HCheckCast);
+};
+
+class HMonitorOperation : public HTemplateInstruction<1> {
+ public:
+ enum OperationKind {
+ kEnter,
+ kExit,
+ };
+
+ HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()), kind_(kind), dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ }
+
+ // Instruction may throw a Java exception, so we need an environment.
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsEnter() const { return kind_ == kEnter; }
+
+ DECLARE_INSTRUCTION(MonitorOperation);
+
+ protected:
+ const OperationKind kind_;
+ const uint32_t dex_pc_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 3d81362..ba4be34 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -713,7 +713,7 @@
graph->AddBlock(block);
entry->AddSuccessor(block);
- *div = new (allocator) HDiv(Primitive::kPrimInt, first, second);
+ *div = new (allocator) HDiv(Primitive::kPrimInt, first, second, 0); // don't care about dex_pc.
block->AddInstruction(*div);
block->AddInstruction(new (allocator) HExit());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index e83c528..fec40f9 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -253,4 +253,9 @@
instruction->SetEnvironment(environment);
}
+void SsaBuilder::VisitTemporary(HTemporary* temp) {
+ // Temporaries are only used by the baseline register allocator.
+ temp->GetBlock()->RemoveInstruction(temp);
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 24f5ac5..2207cd6 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -51,6 +51,7 @@
void VisitLoadLocal(HLoadLocal* load);
void VisitStoreLocal(HStoreLocal* store);
void VisitInstruction(HInstruction* instruction);
+ void VisitTemporary(HTemporary* instruction);
static HInstruction* GetFloatOrDoubleEquivalent(HInstruction* user,
HInstruction* instruction,
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dca2ab7..d288b70 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -421,6 +421,12 @@
virtual void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
virtual void udiv(Register rd, Register rn, Register rm, Condition cond = AL) = 0;
+ // Bit field extract instructions.
+ virtual void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width,
+ Condition cond = AL) = 0;
+ virtual void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width,
+ Condition cond = AL) = 0;
+
// Load/store instructions.
virtual void ldr(Register rd, const Address& ad, Condition cond = AL) = 0;
virtual void str(Register rd, const Address& ad, Condition cond = AL) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index c8a57b1..39ebf68 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -208,6 +208,44 @@
}
+void Arm32Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 | B25 | B24 | B23 | B21 |
+ (widthminus1 << 16) |
+ (static_cast<uint32_t>(rd) << 12) |
+ (lsb << 7) |
+ B6 | B4 |
+ static_cast<uint32_t>(rn);
+ Emit(encoding);
+}
+
+
+void Arm32Assembler::ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CHECK_NE(rd, kNoRegister);
+ CHECK_NE(rn, kNoRegister);
+ CHECK_NE(cond, kNoCondition);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+
+ int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+ B26 | B25 | B24 | B23 | B22 | B21 |
+ (widthminus1 << 16) |
+ (static_cast<uint32_t>(rd) << 12) |
+ (lsb << 7) |
+ B6 | B4 |
+ static_cast<uint32_t>(rn);
+ Emit(encoding);
+}
+
+
void Arm32Assembler::ldr(Register rd, const Address& ad, Condition cond) {
EmitMemOp(cond, true, false, rd, ad);
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index dbabb99..0b009e1 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -96,6 +96,10 @@
void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
+ // Bit field extract instructions.
+ void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+ void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+
// Load/store instructions.
void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
new file mode 100644
index 0000000..277a9eb
--- /dev/null
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_arm32.h"
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+class AssemblerArm32Test : public AssemblerTest<arm::Arm32Assembler,
+ arm::Register, arm::SRegister,
+ uint32_t> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "arm";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -marm --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new arm::Register(arm::R0),
+ new arm::Register(arm::R1),
+ new arm::Register(arm::R2),
+ new arm::Register(arm::R3),
+ new arm::Register(arm::R4),
+ new arm::Register(arm::R5),
+ new arm::Register(arm::R6),
+ new arm::Register(arm::R7),
+ new arm::Register(arm::R8),
+ new arm::Register(arm::R9),
+ new arm::Register(arm::R10),
+ new arm::Register(arm::R11),
+ new arm::Register(arm::R12),
+ new arm::Register(arm::R13),
+ new arm::Register(arm::R14),
+ new arm::Register(arm::R15)
+ });
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(®isters_);
+ }
+
+ std::vector<arm::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ private:
+ std::vector<arm::Register*> registers_;
+};
+
+
+TEST_F(AssemblerArm32Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+
+TEST_F(AssemblerArm32Test, Sbfx) {
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "sbfx r0, r1, #0, #1\n"
+ "sbfx r0, r1, #0, #8\n"
+ "sbfx r0, r1, #0, #16\n"
+ "sbfx r0, r1, #0, #32\n"
+
+ "sbfx r0, r1, #8, #1\n"
+ "sbfx r0, r1, #8, #8\n"
+ "sbfx r0, r1, #8, #16\n"
+ "sbfx r0, r1, #8, #24\n"
+
+ "sbfx r0, r1, #16, #1\n"
+ "sbfx r0, r1, #16, #8\n"
+ "sbfx r0, r1, #16, #16\n"
+
+ "sbfx r0, r1, #31, #1\n";
+ DriverStr(expected, "sbfx");
+}
+
+TEST_F(AssemblerArm32Test, Ubfx) {
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "ubfx r0, r1, #0, #1\n"
+ "ubfx r0, r1, #0, #8\n"
+ "ubfx r0, r1, #0, #16\n"
+ "ubfx r0, r1, #0, #32\n"
+
+ "ubfx r0, r1, #8, #1\n"
+ "ubfx r0, r1, #8, #8\n"
+ "ubfx r0, r1, #8, #16\n"
+ "ubfx r0, r1, #8, #24\n"
+
+ "ubfx r0, r1, #16, #1\n"
+ "ubfx r0, r1, #16, #8\n"
+ "ubfx r0, r1, #16, #16\n"
+
+ "ubfx r0, r1, #31, #1\n";
+ DriverStr(expected, "ubfx");
+}
+
+} // namespace art
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 053e843..3ab9b2b 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -264,6 +264,48 @@
}
+void Thumb2Assembler::sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CheckCondition(cond);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+ uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
+ uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
+
+ uint32_t op = 20U /* 0b10100 */;
+ int32_t encoding = B31 | B30 | B29 | B28 | B25 |
+ op << 20 |
+ static_cast<uint32_t>(rn) << 16 |
+ imm3 << 12 |
+ static_cast<uint32_t>(rd) << 8 |
+ imm2 << 6 |
+ widthminus1;
+
+ Emit32(encoding);
+}
+
+
+void Thumb2Assembler::ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond) {
+ CheckCondition(cond);
+ CHECK_LE(lsb, 31U);
+ CHECK(1U <= width && width <= 32U) << width;
+ uint32_t widthminus1 = width - 1;
+ uint32_t imm2 = lsb & (B1 | B0); // Bits 0-1 of `lsb`.
+ uint32_t imm3 = (lsb & (B4 | B3 | B2)) >> 2; // Bits 2-4 of `lsb`.
+
+ uint32_t op = 28U /* 0b11100 */;
+ int32_t encoding = B31 | B30 | B29 | B28 | B25 |
+ op << 20 |
+ static_cast<uint32_t>(rn) << 16 |
+ imm3 << 12 |
+ static_cast<uint32_t>(rd) << 8 |
+ imm2 << 6 |
+ widthminus1;
+
+ Emit32(encoding);
+}
+
+
void Thumb2Assembler::ldr(Register rd, const Address& ad, Condition cond) {
EmitLoadStore(cond, true, false, false, false, rd, ad);
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 9ccdef7..cfa251a 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -118,6 +118,10 @@
void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
+ // Bit field extract instructions.
+ void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+ void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
+
// Load/store instructions.
void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
new file mode 100644
index 0000000..65d6d45
--- /dev/null
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_thumb2.h"
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+class AssemblerThumb2Test : public AssemblerTest<arm::Thumb2Assembler,
+ arm::Register, arm::SRegister,
+ uint32_t> {
+ protected:
+ std::string GetArchitectureString() OVERRIDE {
+ return "arm";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " -mthumb";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -marm --no-show-raw-insn";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.insert(end(registers_),
+ { // NOLINT(whitespace/braces)
+ new arm::Register(arm::R0),
+ new arm::Register(arm::R1),
+ new arm::Register(arm::R2),
+ new arm::Register(arm::R3),
+ new arm::Register(arm::R4),
+ new arm::Register(arm::R5),
+ new arm::Register(arm::R6),
+ new arm::Register(arm::R7),
+ new arm::Register(arm::R8),
+ new arm::Register(arm::R9),
+ new arm::Register(arm::R10),
+ new arm::Register(arm::R11),
+ new arm::Register(arm::R12),
+ new arm::Register(arm::R13),
+ new arm::Register(arm::R14),
+ new arm::Register(arm::R15)
+ });
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(®isters_);
+ }
+
+ std::vector<arm::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ private:
+ std::vector<arm::Register*> registers_;
+};
+
+
+TEST_F(AssemblerThumb2Test, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+
+TEST_F(AssemblerThumb2Test, Sbfx) {
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->sbfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->sbfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "sbfx r0, r1, #0, #1\n"
+ "sbfx r0, r1, #0, #8\n"
+ "sbfx r0, r1, #0, #16\n"
+ "sbfx r0, r1, #0, #32\n"
+
+ "sbfx r0, r1, #8, #1\n"
+ "sbfx r0, r1, #8, #8\n"
+ "sbfx r0, r1, #8, #16\n"
+ "sbfx r0, r1, #8, #24\n"
+
+ "sbfx r0, r1, #16, #1\n"
+ "sbfx r0, r1, #16, #8\n"
+ "sbfx r0, r1, #16, #16\n"
+
+ "sbfx r0, r1, #31, #1\n";
+ DriverStr(expected, "sbfx");
+}
+
+TEST_F(AssemblerThumb2Test, Ubfx) {
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 16);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 0, 32);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 16);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 8, 24);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 1);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 8);
+ GetAssembler()->ubfx(arm::R0, arm::R1, 16, 16);
+
+ GetAssembler()->ubfx(arm::R0, arm::R1, 31, 1);
+
+ const char* expected =
+ "ubfx r0, r1, #0, #1\n"
+ "ubfx r0, r1, #0, #8\n"
+ "ubfx r0, r1, #0, #16\n"
+ "ubfx r0, r1, #0, #32\n"
+
+ "ubfx r0, r1, #8, #1\n"
+ "ubfx r0, r1, #8, #8\n"
+ "ubfx r0, r1, #8, #16\n"
+ "ubfx r0, r1, #8, #24\n"
+
+ "ubfx r0, r1, #16, #1\n"
+ "ubfx r0, r1, #16, #8\n"
+ "ubfx r0, r1, #16, #16\n"
+
+ "ubfx r0, r1, #31, #1\n";
+ DriverStr(expected, "ubfx");
+}
+
+} // namespace art
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index ad7e98d..67711e3 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -19,16 +19,16 @@
#include <vector>
+#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/macros.h"
#include "arm/constants_arm.h"
-#include "mips/constants_mips.h"
-#include "x86/constants_x86.h"
-#include "x86_64/constants_x86_64.h"
-#include "instruction_set.h"
#include "managed_register.h"
#include "memory_region.h"
+#include "mips/constants_mips.h"
#include "offsets.h"
+#include "x86/constants_x86.h"
+#include "x86_64/constants_x86_64.h"
namespace art {
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 91237ae..9d3fa01 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -29,21 +29,31 @@
namespace art {
+// Helper for a constexpr string length.
+constexpr size_t ConstexprStrLen(char const* str, size_t count = 0) {
+ return ('\0' == str[0]) ? count : ConstexprStrLen(str+1, count+1);
+}
+
// Use a glocal static variable to keep the same name for all test data. Else we'll just spam the
// temp directory.
static std::string tmpnam_;
-template<typename Ass, typename Reg, typename Imm>
+template<typename Ass, typename Reg, typename FPReg, typename Imm>
class AssemblerTest : public testing::Test {
public:
+ enum class RegisterView { // private
+ kUsePrimaryName,
+ kUseSecondaryName
+ };
+
Ass* GetAssembler() {
return assembler_.get();
}
- typedef std::string (*TestFn)(Ass* assembler);
+ typedef std::string (*TestFn)(AssemblerTest* assembler_test, Ass* assembler);
void DriverFn(TestFn f, std::string test_name) {
- Driver(f(assembler_.get()), test_name);
+ Driver(f(this, assembler_.get()), test_name);
}
// This driver assumes the assembler has already been called.
@@ -52,116 +62,114 @@
}
std::string RepeatR(void (Ass::*f)(Reg), std::string fmt) {
- const std::vector<Reg*> registers = GetRegisters();
- std::string str;
- for (auto reg : registers) {
- (assembler_.get()->*f)(*reg);
- std::string base = fmt;
+ return RepeatTemplatedRegister<Reg>(f,
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
- size_t reg_index = base.find("{reg}");
- if (reg_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg;
- std::string reg_string = sreg.str();
- base.replace(reg_index, 5, reg_string);
- }
-
- if (str.size() > 0) {
- str += "\n";
- }
- str += base;
- }
- // Add a newline at the end.
- str += "\n";
- return str;
+ std::string Repeatr(void (Ass::*f)(Reg), std::string fmt) {
+ return RepeatTemplatedRegister<Reg>(f,
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
}
std::string RepeatRR(void (Ass::*f)(Reg, Reg), std::string fmt) {
- const std::vector<Reg*> registers = GetRegisters();
- std::string str;
- for (auto reg1 : registers) {
- for (auto reg2 : registers) {
- (assembler_.get()->*f)(*reg1, *reg2);
- std::string base = fmt;
+ return RepeatTemplatedRegisters<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
- size_t reg1_index = base.find("{reg1}");
- if (reg1_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg1;
- std::string reg_string = sreg.str();
- base.replace(reg1_index, 6, reg_string);
- }
+ std::string Repeatrr(void (Ass::*f)(Reg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
+ }
- size_t reg2_index = base.find("{reg2}");
- if (reg2_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg2;
- std::string reg_string = sreg.str();
- base.replace(reg2_index, 6, reg_string);
- }
-
- if (str.size() > 0) {
- str += "\n";
- }
- str += base;
- }
- }
- // Add a newline at the end.
- str += "\n";
- return str;
+ std::string RepeatRr(void (Ass::*f)(Reg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
}
std::string RepeatRI(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, std::string fmt) {
- const std::vector<Reg*> registers = GetRegisters();
- std::string str;
- std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
- for (auto reg : registers) {
- for (int64_t imm : imms) {
- Imm new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*reg, new_imm);
- std::string base = fmt;
-
- size_t reg_index = base.find("{reg}");
- if (reg_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << *reg;
- std::string reg_string = sreg.str();
- base.replace(reg_index, 5, reg_string);
- }
-
- size_t imm_index = base.find("{imm}");
- if (imm_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << imm;
- std::string imm_string = sreg.str();
- base.replace(imm_index, 5, imm_string);
- }
-
- if (str.size() > 0) {
- str += "\n";
- }
- str += base;
- }
- }
- // Add a newline at the end.
- str += "\n";
- return str;
+ return RepeatRegisterImm<RegisterView::kUsePrimaryName>(f, imm_bytes, fmt);
}
- std::string RepeatI(void (Ass::*f)(const Imm&), size_t imm_bytes, std::string fmt) {
+ std::string Repeatri(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes, std::string fmt) {
+ return RepeatRegisterImm<RegisterView::kUseSecondaryName>(f, imm_bytes, fmt);
+ }
+
+ std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, FPReg>(f,
+ GetFPRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ std::string RepeatFR(void (Ass::*f)(FPReg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, Reg>(f,
+ GetFPRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ std::string RepeatFr(void (Ass::*f)(FPReg, Reg), std::string fmt) {
+ return RepeatTemplatedRegisters<FPReg, Reg>(f,
+ GetFPRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ fmt);
+ }
+
+ std::string RepeatRF(void (Ass::*f)(Reg, FPReg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, FPReg>(f,
+ GetRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ std::string RepeatrF(void (Ass::*f)(Reg, FPReg), std::string fmt) {
+ return RepeatTemplatedRegisters<Reg, FPReg>(f,
+ GetRegisters(),
+ GetFPRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUseSecondaryName>,
+ &AssemblerTest::GetFPRegName,
+ fmt);
+ }
+
+ std::string RepeatI(void (Ass::*f)(const Imm&), size_t imm_bytes, std::string fmt,
+ bool as_uint = false) {
std::string str;
- std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
+ std::vector<int64_t> imms = CreateImmediateValues(imm_bytes, as_uint);
for (int64_t imm : imms) {
Imm new_imm = CreateImmediate(imm);
(assembler_.get()->*f)(new_imm);
std::string base = fmt;
- size_t imm_index = base.find("{imm}");
+ size_t imm_index = base.find(IMM_TOKEN);
if (imm_index != std::string::npos) {
std::ostringstream sreg;
sreg << imm;
std::string imm_string = sreg.str();
- base.replace(imm_index, 5, imm_string);
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
}
if (str.size() > 0) {
@@ -200,7 +208,24 @@
return true;
}
+ // The following functions are public so that TestFn can use them...
+
+ virtual std::vector<Reg*> GetRegisters() = 0;
+
+ virtual std::vector<FPReg*> GetFPRegisters() {
+ UNIMPLEMENTED(FATAL) << "Architecture does not support floating-point registers";
+ UNREACHABLE();
+ }
+
+ // Secondary register names are the secondary view on registers, e.g., 32b on 64b systems.
+ virtual std::string GetSecondaryRegisterName(const Reg& reg ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "Architecture does not support secondary registers";
+ UNREACHABLE();
+ }
+
protected:
+ explicit AssemblerTest() {}
+
void SetUp() OVERRIDE {
assembler_.reset(new Ass());
@@ -219,8 +244,6 @@
// Override this to set up any architecture-specific things, e.g., register vectors.
virtual void SetUpHelpers() {}
- virtual std::vector<Reg*> GetRegisters() = 0;
-
// Get the typically used name for this architecture, e.g., aarch64, x86_64, ...
virtual std::string GetArchitectureString() = 0;
@@ -305,23 +328,41 @@
}
// Create a couple of immediate values up to the number of bytes given.
- virtual std::vector<int64_t> CreateImmediateValues(size_t imm_bytes) {
+ virtual std::vector<int64_t> CreateImmediateValues(size_t imm_bytes, bool as_uint = false) {
std::vector<int64_t> res;
res.push_back(0);
- res.push_back(-1);
+ if (!as_uint) {
+ res.push_back(-1);
+ } else {
+ res.push_back(0xFF);
+ }
res.push_back(0x12);
if (imm_bytes >= 2) {
res.push_back(0x1234);
- res.push_back(-0x1234);
+ if (!as_uint) {
+ res.push_back(-0x1234);
+ } else {
+ res.push_back(0xFFFF);
+ }
if (imm_bytes >= 4) {
res.push_back(0x12345678);
- res.push_back(-0x12345678);
+ if (!as_uint) {
+ res.push_back(-0x12345678);
+ } else {
+ res.push_back(0xFFFFFFFF);
+ }
if (imm_bytes >= 6) {
res.push_back(0x123456789ABC);
- res.push_back(-0x123456789ABC);
+ if (!as_uint) {
+ res.push_back(-0x123456789ABC);
+ }
if (imm_bytes >= 8) {
res.push_back(0x123456789ABCDEF0);
- res.push_back(-0x123456789ABCDEF0);
+ if (!as_uint) {
+ res.push_back(-0x123456789ABCDEF0);
+ } else {
+ res.push_back(0xFFFFFFFFFFFFFFFF);
+ }
}
}
}
@@ -332,7 +373,127 @@
// Create an immediate from the specific value.
virtual Imm CreateImmediate(int64_t imm_value) = 0;
+ template <typename RegType>
+ std::string RepeatTemplatedRegister(void (Ass::*f)(RegType),
+ const std::vector<RegType*> registers,
+ std::string (AssemblerTest::*GetName)(const RegType&),
+ std::string fmt) {
+ std::string str;
+ for (auto reg : registers) {
+ (assembler_.get()->*f)(*reg);
+ std::string base = fmt;
+
+ std::string reg_string = (this->*GetName)(*reg);
+ size_t reg_index;
+ if ((reg_index = base.find(REG_TOKEN)) != std::string::npos) {
+ base.replace(reg_index, ConstexprStrLen(REG_TOKEN), reg_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRegisters(void (Ass::*f)(Reg1, Reg2),
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string fmt) {
+ std::string str;
+ for (auto reg1 : reg1_registers) {
+ for (auto reg2 : reg2_registers) {
+ (assembler_.get()->*f)(*reg1, *reg2);
+ std::string base = fmt;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+ base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+ }
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+ base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
private:
+ template <RegisterView kRegView>
+ std::string GetRegName(const Reg& reg) {
+ std::ostringstream sreg;
+ switch (kRegView) {
+ case RegisterView::kUsePrimaryName:
+ sreg << reg;
+ break;
+
+ case RegisterView::kUseSecondaryName:
+ sreg << GetSecondaryRegisterName(reg);
+ break;
+ }
+ return sreg.str();
+ }
+
+ std::string GetFPRegName(const FPReg& reg) {
+ std::ostringstream sreg;
+ sreg << reg;
+ return sreg.str();
+ }
+
+ template <RegisterView kRegView>
+ std::string RepeatRegisterImm(void (Ass::*f)(Reg, const Imm&), size_t imm_bytes,
+ std::string fmt) {
+ const std::vector<Reg*> registers = GetRegisters();
+ std::string str;
+ std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
+ for (auto reg : registers) {
+ for (int64_t imm : imms) {
+ Imm new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(*reg, new_imm);
+ std::string base = fmt;
+
+ std::string reg_string = GetRegName<kRegView>(*reg);
+ size_t reg_index;
+ while ((reg_index = base.find(REG_TOKEN)) != std::string::npos) {
+ base.replace(reg_index, ConstexprStrLen(REG_TOKEN), reg_string);
+ }
+
+ size_t imm_index = base.find(IMM_TOKEN);
+ if (imm_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << imm;
+ std::string imm_string = sreg.str();
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
// Driver() assembles and compares the results. If the results are not equal and we have a
// disassembler, disassemble both and check whether they have the same mnemonics (in which case
// we just warn).
@@ -394,10 +555,19 @@
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetAssemblerCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetAssemblerCommand());
args.push_back("-o");
args.push_back(to_file);
args.push_back(from_file);
+ std::string cmd = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(cmd);
return Exec(args, error_msg);
}
@@ -414,6 +584,9 @@
std::string error_msg;
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetObjdumpCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetObjdumpCommand());
args.push_back(file);
args.push_back(">");
@@ -477,12 +650,12 @@
bool result = CompareFiles(data_name + ".dis", as_name + ".dis");
- if (result) {
- std::remove(data_name.c_str());
- std::remove(as_name.c_str());
- std::remove((data_name + ".dis").c_str());
- std::remove((as_name + ".dis").c_str());
- }
+ // If you want to take a look at the differences between the ART assembler and GCC, comment
+ // out the removal code.
+ std::remove(data_name.c_str());
+ std::remove(as_name.c_str());
+ std::remove((data_name + ".dis").c_str());
+ std::remove((as_name + ".dis").c_str());
return result;
}
@@ -490,6 +663,9 @@
bool DisassembleBinary(std::string file, std::string* error_msg) {
std::vector<std::string> args;
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetDisassembleCommand() may contain arguments
+ // in addition to the program name.
args.push_back(GetDisassembleCommand());
args.push_back(file);
args.push_back("| sed -n \'/<.data>/,$p\' | sed -e \'s/.*://\'");
@@ -686,6 +862,13 @@
return tmpnam_;
}
+ static constexpr size_t OBJDUMP_SECTION_LINE_MIN_TOKENS = 6;
+
+ static constexpr const char* REG_TOKEN = "{reg}";
+ static constexpr const char* REG1_TOKEN = "{reg1}";
+ static constexpr const char* REG2_TOKEN = "{reg2}";
+ static constexpr const char* IMM_TOKEN = "{imm}";
+
std::unique_ptr<Ass> assembler_;
std::string resolved_assembler_cmd_;
@@ -694,7 +877,7 @@
std::string android_data_;
- static constexpr size_t OBJDUMP_SECTION_LINE_MIN_TOKENS = 6;
+ DISALLOW_COPY_AND_ASSIGN(AssemblerTest);
};
} // namespace art
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
index e762f7d..c348f2c 100644
--- a/compiler/utils/stack_checks.h
+++ b/compiler/utils/stack_checks.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_UTILS_STACK_CHECKS_H_
#define ART_COMPILER_UTILS_STACK_CHECKS_H_
-#include "instruction_set.h"
+#include "arch/instruction_set.h"
namespace art {
@@ -34,8 +34,7 @@
// stack overflow check on method entry.
//
// A frame is considered large when it's above kLargeFrameSize.
-static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) {
- UNUSED(isa);
+static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa ATTRIBUTE_UNUSED) {
return size >= kLargeFrameSize;
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 4ddf979..8ebb40e 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -873,6 +873,13 @@
}
+void X86Assembler::andl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x23);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::andl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(4, Operand(dst), imm);
@@ -886,6 +893,13 @@
}
+void X86Assembler::orl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0B);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::orl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(1, Operand(dst), imm);
@@ -898,11 +912,20 @@
EmitOperand(dst, Operand(src));
}
+
+void X86Assembler::xorl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x33);
+ EmitOperand(reg, address);
+}
+
+
void X86Assembler::xorl(Register dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(6, Operand(dst), imm);
}
+
void X86Assembler::addl(Register reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(0, Operand(reg), imm);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index de4e6de..8aed934 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -350,12 +350,15 @@
void andl(Register dst, const Immediate& imm);
void andl(Register dst, Register src);
+ void andl(Register dst, const Address& address);
void orl(Register dst, const Immediate& imm);
void orl(Register dst, Register src);
+ void orl(Register dst, const Address& address);
void xorl(Register dst, Register src);
void xorl(Register dst, const Immediate& imm);
+ void xorl(Register dst, const Address& address);
void addl(Register dst, Register src);
void addl(Register reg, const Immediate& imm);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 5b70658..2bb2ed8 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -120,6 +120,7 @@
void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) {
+ CHECK(imm.is_int32());
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
EmitUint8(0xB8 + dst.LowBits());
@@ -344,7 +345,7 @@
void X86_64Assembler::movss(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
- EmitOptionalRex32(dst, src);
+ EmitOptionalRex32(src, dst); // Movss is MR encoding instead of the usual RM.
EmitUint8(0x0F);
EmitUint8(0x11);
EmitXmmRegisterOperand(src.LowBits(), dst);
@@ -353,7 +354,7 @@
void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst);
+ EmitRex64(dst, src);
EmitUint8(0x63);
EmitRegisterOperand(dst.LowBits(), src.LowBits());
}
@@ -504,7 +505,7 @@
void X86_64Assembler::movsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
- EmitOptionalRex32(dst, src);
+ EmitOptionalRex32(src, dst); // Movsd is MR encoding instead of the usual RM.
EmitUint8(0x0F);
EmitUint8(0x11);
EmitXmmRegisterOperand(src.LowBits(), dst);
@@ -855,17 +856,46 @@
void X86_64Assembler::xchgl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(dst, src);
+ // There is a short version for rax.
+ // It's a bit awkward, as CpuRegister has a const field, so assignment and thus swapping doesn't
+ // work.
+ const bool src_rax = src.AsRegister() == RAX;
+ const bool dst_rax = dst.AsRegister() == RAX;
+ if (src_rax || dst_rax) {
+ EmitOptionalRex32(src_rax ? dst : src);
+ EmitUint8(0x90 + (src_rax ? dst.LowBits() : src.LowBits()));
+ return;
+ }
+
+ // General case.
+ EmitOptionalRex32(src, dst);
EmitUint8(0x87);
- EmitRegisterOperand(dst.LowBits(), src.LowBits());
+ EmitRegisterOperand(src.LowBits(), dst.LowBits());
}
void X86_64Assembler::xchgq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst, src);
+ // There is a short version for rax.
+ // It's a bit awkward, as CpuRegister has a const field, so assignment and thus swapping doesn't
+ // work.
+ const bool src_rax = src.AsRegister() == RAX;
+ const bool dst_rax = dst.AsRegister() == RAX;
+ if (src_rax || dst_rax) {
+ // If src == target, emit a nop instead.
+ if (src_rax && dst_rax) {
+ EmitUint8(0x90);
+ } else {
+ EmitRex64(src_rax ? dst : src);
+ EmitUint8(0x90 + (src_rax ? dst.LowBits() : src.LowBits()));
+ }
+ return;
+ }
+
+ // General case.
+ EmitRex64(src, dst);
EmitUint8(0x87);
- EmitOperand(dst.LowBits(), Operand(src));
+ EmitRegisterOperand(src.LowBits(), dst.LowBits());
}
@@ -908,6 +938,21 @@
}
+void X86_64Assembler::cmpl(const Address& address, CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x39);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
+void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(address);
+ EmitComplex(7, address, imm);
+}
+
+
void X86_64Assembler::cmpq(CpuRegister reg0, CpuRegister reg1) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg0, reg1);
@@ -932,6 +977,14 @@
}
+void X86_64Assembler::cmpq(const Address& address, const Immediate& imm) {
+ CHECK(imm.is_int32()); // cmpq only supports 32b immediate.
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(address);
+ EmitComplex(7, address, imm);
+}
+
+
void X86_64Assembler::addl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -948,21 +1001,6 @@
}
-void X86_64Assembler::cmpl(const Address& address, CpuRegister reg) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(reg, address);
- EmitUint8(0x39);
- EmitOperand(reg.LowBits(), address);
-}
-
-
-void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitOptionalRex32(address);
- EmitComplex(7, address, imm);
-}
-
-
void X86_64Assembler::testl(CpuRegister reg1, CpuRegister reg2) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg1, reg2);
@@ -997,6 +1035,14 @@
}
+void X86_64Assembler::testq(CpuRegister reg1, CpuRegister reg2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg1, reg2);
+ EmitUint8(0x85);
+ EmitRegisterOperand(reg1.LowBits(), reg2.LowBits());
+}
+
+
void X86_64Assembler::testq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(reg);
@@ -1013,6 +1059,14 @@
}
+void X86_64Assembler::andl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x23);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::andl(CpuRegister dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
@@ -1028,6 +1082,14 @@
}
+void X86_64Assembler::andq(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x23);
+ EmitOperand(dst.LowBits(), Operand(src));
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1036,6 +1098,14 @@
}
+void X86_64Assembler::orl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x0B);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst);
@@ -1043,6 +1113,14 @@
}
+void X86_64Assembler::orq(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x0B);
+ EmitOperand(dst.LowBits(), Operand(src));
+}
+
+
void X86_64Assembler::xorl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1051,6 +1129,21 @@
}
+void X86_64Assembler::xorl(CpuRegister reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(reg, address);
+ EmitUint8(0x33);
+ EmitOperand(reg.LowBits(), address);
+}
+
+
+void X86_64Assembler::xorl(CpuRegister dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(dst);
+ EmitComplex(6, Operand(dst), imm);
+}
+
+
void X86_64Assembler::xorq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitRex64(dst, src);
@@ -1219,6 +1312,13 @@
}
+void X86_64Assembler::cqo() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64();
+ EmitUint8(0x99);
+}
+
+
void X86_64Assembler::idivl(CpuRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg);
@@ -1227,6 +1327,14 @@
}
+void X86_64Assembler::idivq(CpuRegister reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(reg);
+ EmitUint8(0xF7);
+ EmitUint8(0xF8 | reg.LowBits());
+}
+
+
void X86_64Assembler::imull(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1235,13 +1343,25 @@
EmitOperand(dst.LowBits(), Operand(src));
}
-
void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32()); // imull only supports 32b immediate.
+
EmitOptionalRex32(reg, reg);
- EmitUint8(0x69);
- EmitOperand(reg.LowBits(), Operand(reg));
- EmitImmediate(imm);
+
+ // See whether imm can be represented as a sign-extended 8bit value.
+ int32_t v32 = static_cast<int32_t>(imm.value());
+ if (IsInt32(8, v32)) {
+ // Sign-extension works.
+ EmitUint8(0x6B);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitUint8(static_cast<uint8_t>(v32 & 0xFF));
+ } else {
+ // Not representable, use full immediate.
+ EmitUint8(0x69);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitImmediate(imm);
+ }
}
@@ -1266,10 +1386,22 @@
void X86_64Assembler::imulq(CpuRegister reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int32()); // imulq only supports 32b immediate.
- EmitRex64(reg);
- EmitUint8(0x69);
- EmitOperand(reg.LowBits(), Operand(reg));
- EmitImmediate(imm);
+
+ EmitRex64(reg, reg);
+
+ // See whether imm can be represented as a sign-extended 8bit value.
+ int64_t v64 = imm.value();
+ if (IsInt64(8, v64)) {
+ // Sign-extension works.
+ EmitUint8(0x6B);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitUint8(static_cast<uint8_t>(v64 & 0xFF));
+ } else {
+ // Not representable, use full immediate.
+ EmitUint8(0x69);
+ EmitOperand(reg.LowBits(), Operand(reg));
+ EmitImmediate(imm);
+ }
}
@@ -1680,6 +1812,8 @@
CHECK(imm.is_int8());
if (wide) {
EmitRex64(reg);
+ } else {
+ EmitOptionalRex32(reg);
}
if (imm.value() == 1) {
EmitUint8(0xD1);
@@ -1697,6 +1831,7 @@
CpuRegister shifter) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK_EQ(shifter.AsRegister(), RCX);
+ EmitOptionalRex32(operand);
EmitUint8(0xD3);
EmitOperand(reg_or_opcode, Operand(operand));
}
@@ -1772,10 +1907,20 @@
}
}
+void X86_64Assembler::EmitRex64() {
+ EmitOptionalRex(false, true, false, false, false);
+}
+
void X86_64Assembler::EmitRex64(CpuRegister reg) {
EmitOptionalRex(false, true, false, false, reg.NeedsRex());
}
+void X86_64Assembler::EmitRex64(const Operand& operand) {
+ uint8_t rex = operand.rex();
+ rex |= 0x48; // REX.W000
+ EmitUint8(rex);
+}
+
void X86_64Assembler::EmitRex64(CpuRegister dst, CpuRegister src) {
EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex());
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 42d774a..4dd70e2 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -328,17 +328,17 @@
void divsd(XmmRegister dst, XmmRegister src);
void divsd(XmmRegister dst, const Address& src);
- void cvtsi2ss(XmmRegister dst, CpuRegister src);
- void cvtsi2sd(XmmRegister dst, CpuRegister src);
+ void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
+ void cvtsi2sd(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
- void cvtss2si(CpuRegister dst, XmmRegister src);
+ void cvtss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtss2sd(XmmRegister dst, XmmRegister src);
- void cvtsd2si(CpuRegister dst, XmmRegister src);
+ void cvtsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtsd2ss(XmmRegister dst, XmmRegister src);
- void cvttss2si(CpuRegister dst, XmmRegister src);
- void cvttsd2si(CpuRegister dst, XmmRegister src);
+ void cvttss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
+ void cvttsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtdq2pd(XmmRegister dst, XmmRegister src);
@@ -390,20 +390,28 @@
void cmpq(CpuRegister reg0, CpuRegister reg1);
void cmpq(CpuRegister reg0, const Immediate& imm);
void cmpq(CpuRegister reg0, const Address& address);
+ void cmpq(const Address& address, const Immediate& imm);
void testl(CpuRegister reg1, CpuRegister reg2);
void testl(CpuRegister reg, const Immediate& imm);
+ void testq(CpuRegister reg1, CpuRegister reg2);
void testq(CpuRegister reg, const Address& address);
void andl(CpuRegister dst, const Immediate& imm);
void andl(CpuRegister dst, CpuRegister src);
+ void andl(CpuRegister reg, const Address& address);
void andq(CpuRegister dst, const Immediate& imm);
+ void andq(CpuRegister dst, CpuRegister src);
void orl(CpuRegister dst, const Immediate& imm);
void orl(CpuRegister dst, CpuRegister src);
+ void orl(CpuRegister reg, const Address& address);
+ void orq(CpuRegister dst, CpuRegister src);
void xorl(CpuRegister dst, CpuRegister src);
+ void xorl(CpuRegister dst, const Immediate& imm);
+ void xorl(CpuRegister reg, const Address& address);
void xorq(CpuRegister dst, const Immediate& imm);
void xorq(CpuRegister dst, CpuRegister src);
@@ -426,8 +434,10 @@
void subq(CpuRegister dst, const Address& address);
void cdq();
+ void cqo();
void idivl(CpuRegister reg);
+ void idivq(CpuRegister reg);
void imull(CpuRegister dst, CpuRegister src);
void imull(CpuRegister reg, const Immediate& imm);
@@ -663,7 +673,9 @@
void EmitOptionalRex32(XmmRegister dst, const Operand& operand);
// Emit a REX.W prefix plus necessary register bit encodings.
+ void EmitRex64();
void EmitRex64(CpuRegister reg);
+ void EmitRex64(const Operand& operand);
void EmitRex64(CpuRegister dst, CpuRegister src);
void EmitRex64(CpuRegister dst, const Operand& operand);
void EmitRex64(XmmRegister dst, CpuRegister src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 0e8ea5b..af389e6 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -16,8 +16,13 @@
#include "assembler_x86_64.h"
+#include <inttypes.h>
+#include <map>
+#include <random>
+
#include "base/stl_util.h"
#include "utils/assembler_test.h"
+#include "utils.h"
namespace art {
@@ -30,8 +35,88 @@
ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
}
+#ifdef HAVE_ANDROID_OS
+static constexpr size_t kRandomIterations = 1000; // Devices might be puny, don't stress them...
+#else
+static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerful.
+#endif
+
+TEST(AssemblerX86_64, SignExtension) {
+ // 32bit.
+ for (int32_t i = 0; i < 128; i++) {
+ EXPECT_TRUE(IsInt32(8, i)) << i;
+ }
+ for (int32_t i = 128; i < 255; i++) {
+ EXPECT_FALSE(IsInt32(8, i)) << i;
+ }
+ // Do some higher ones randomly.
+ std::random_device rd;
+ std::default_random_engine e1(rd());
+ std::uniform_int_distribution<int32_t> uniform_dist(256, INT32_MAX);
+ for (size_t i = 0; i < kRandomIterations; i++) {
+ int32_t value = uniform_dist(e1);
+ EXPECT_FALSE(IsInt32(8, value)) << value;
+ }
+
+ // Negative ones.
+ for (int32_t i = -1; i >= -128; i--) {
+ EXPECT_TRUE(IsInt32(8, i)) << i;
+ }
+
+ for (int32_t i = -129; i > -256; i--) {
+ EXPECT_FALSE(IsInt32(8, i)) << i;
+ }
+
+ // Do some lower ones randomly.
+ std::uniform_int_distribution<int32_t> uniform_dist2(INT32_MIN, -256);
+ for (size_t i = 0; i < 100; i++) {
+ int32_t value = uniform_dist2(e1);
+ EXPECT_FALSE(IsInt32(8, value)) << value;
+ }
+
+ // 64bit.
+ for (int64_t i = 0; i < 128; i++) {
+ EXPECT_TRUE(IsInt64(8, i)) << i;
+ }
+ for (int32_t i = 128; i < 255; i++) {
+ EXPECT_FALSE(IsInt64(8, i)) << i;
+ }
+ // Do some higher ones randomly.
+ std::uniform_int_distribution<int64_t> uniform_dist3(256, INT64_MAX);
+ for (size_t i = 0; i < 100; i++) {
+ int64_t value = uniform_dist3(e1);
+ EXPECT_FALSE(IsInt64(8, value)) << value;
+ }
+
+ // Negative ones.
+ for (int64_t i = -1; i >= -128; i--) {
+ EXPECT_TRUE(IsInt64(8, i)) << i;
+ }
+
+ for (int64_t i = -129; i > -256; i--) {
+ EXPECT_FALSE(IsInt64(8, i)) << i;
+ }
+
+ // Do some lower ones randomly.
+ std::uniform_int_distribution<int64_t> uniform_dist4(INT64_MIN, -256);
+ for (size_t i = 0; i < kRandomIterations; i++) {
+ int64_t value = uniform_dist4(e1);
+ EXPECT_FALSE(IsInt64(8, value)) << value;
+ }
+}
+
+struct X86_64CpuRegisterCompare {
+ bool operator()(const x86_64::CpuRegister& a, const x86_64::CpuRegister& b) const {
+ return a.AsRegister() < b.AsRegister();
+ }
+};
+
class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler, x86_64::CpuRegister,
- x86_64::Immediate> {
+ x86_64::XmmRegister, x86_64::Immediate> {
+ public:
+ typedef AssemblerTest<x86_64::X86_64Assembler, x86_64::CpuRegister,
+ x86_64::XmmRegister, x86_64::Immediate> Base;
+
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
std::string GetArchitectureString() OVERRIDE {
@@ -60,24 +145,71 @@
registers_.push_back(new x86_64::CpuRegister(x86_64::R13));
registers_.push_back(new x86_64::CpuRegister(x86_64::R14));
registers_.push_back(new x86_64::CpuRegister(x86_64::R15));
+
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RAX), "eax");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RBX), "ebx");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RCX), "ecx");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RDX), "edx");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RBP), "ebp");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RSP), "esp");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RSI), "esi");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::RDI), "edi");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R8), "r8d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R9), "r9d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R10), "r10d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R11), "r11d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R12), "r12d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R13), "r13d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R14), "r14d");
+ secondary_register_names_.emplace(x86_64::CpuRegister(x86_64::R15), "r15d");
+
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM0));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM1));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM2));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM3));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM4));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM5));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM6));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM7));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM8));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM9));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM10));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM11));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM12));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM13));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM14));
+ fp_registers_.push_back(new x86_64::XmmRegister(x86_64::XMM15));
}
}
void TearDown() OVERRIDE {
AssemblerTest::TearDown();
STLDeleteElements(®isters_);
+ STLDeleteElements(&fp_registers_);
}
std::vector<x86_64::CpuRegister*> GetRegisters() OVERRIDE {
return registers_;
}
+ std::vector<x86_64::XmmRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
x86_64::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
return x86_64::Immediate(imm_value);
}
+ std::string GetSecondaryRegisterName(const x86_64::CpuRegister& reg) OVERRIDE {
+ CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
+ return secondary_register_names_[reg];
+ }
+
private:
std::vector<x86_64::CpuRegister*> registers_;
+ std::map<x86_64::CpuRegister, std::string, X86_64CpuRegisterCompare> secondary_register_names_;
+
+ std::vector<x86_64::XmmRegister*> fp_registers_;
};
@@ -94,7 +226,6 @@
DriverStr(RepeatI(&x86_64::X86_64Assembler::pushq, 4U, "pushq ${imm}"), "pushqi");
}
-
TEST_F(AssemblerX86_64Test, MovqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::movq, "movq %{reg2}, %{reg1}"), "movq");
}
@@ -103,6 +234,13 @@
DriverStr(RepeatRI(&x86_64::X86_64Assembler::movq, 8U, "movq ${imm}, %{reg}"), "movqi");
}
+TEST_F(AssemblerX86_64Test, MovlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::movl, "mov %{reg2}, %{reg1}"), "movl");
+}
+
+TEST_F(AssemblerX86_64Test, MovlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::movl, 4U, "mov ${imm}, %{reg}"), "movli");
+}
TEST_F(AssemblerX86_64Test, AddqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::addq, "addq %{reg2}, %{reg1}"), "addq");
@@ -112,10 +250,36 @@
DriverStr(RepeatRI(&x86_64::X86_64Assembler::addq, 4U, "addq ${imm}, %{reg}"), "addqi");
}
+TEST_F(AssemblerX86_64Test, AddlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::addl, "add %{reg2}, %{reg1}"), "addl");
+}
+
+TEST_F(AssemblerX86_64Test, AddlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::addl, 4U, "add ${imm}, %{reg}"), "addli");
+}
+
TEST_F(AssemblerX86_64Test, ImulqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::imulq, "imulq %{reg2}, %{reg1}"), "imulq");
}
+TEST_F(AssemblerX86_64Test, ImulqImm) {
+ DriverStr(RepeatRI(&x86_64::X86_64Assembler::imulq, 4U, "imulq ${imm}, %{reg}, %{reg}"),
+ "imulqi");
+}
+
+TEST_F(AssemblerX86_64Test, ImullRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::imull, "imul %{reg2}, %{reg1}"), "imull");
+}
+
+TEST_F(AssemblerX86_64Test, ImullImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::imull, 4U, "imull ${imm}, %{reg}, %{reg}"),
+ "imulli");
+}
+
+TEST_F(AssemblerX86_64Test, Mull) {
+ DriverStr(Repeatr(&x86_64::X86_64Assembler::mull, "mull %{reg}"), "mull");
+}
+
TEST_F(AssemblerX86_64Test, SubqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::subq, "subq %{reg2}, %{reg1}"), "subq");
}
@@ -124,45 +288,178 @@
DriverStr(RepeatRI(&x86_64::X86_64Assembler::subq, 4U, "subq ${imm}, %{reg}"), "subqi");
}
+TEST_F(AssemblerX86_64Test, SublRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::subl, "sub %{reg2}, %{reg1}"), "subl");
+}
+
+TEST_F(AssemblerX86_64Test, SublImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::subl, 4U, "sub ${imm}, %{reg}"), "subli");
+}
+
+// Shll only allows CL as the shift register.
+std::string shll_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
+
+ x86_64::CpuRegister shifter(x86_64::RCX);
+ for (auto reg : registers) {
+ assembler->shll(*reg, shifter);
+ str << "shll %cl, %" << assembler_test->GetSecondaryRegisterName(*reg) << "\n";
+ }
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, ShllReg) {
+ DriverFn(&shll_fn, "shll");
+}
+
+TEST_F(AssemblerX86_64Test, ShllImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::shll, 1U, "shll ${imm}, %{reg}"), "shlli");
+}
+
+// Shrl only allows CL as the shift register.
+std::string shrl_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
+
+ x86_64::CpuRegister shifter(x86_64::RCX);
+ for (auto reg : registers) {
+ assembler->shrl(*reg, shifter);
+ str << "shrl %cl, %" << assembler_test->GetSecondaryRegisterName(*reg) << "\n";
+ }
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, ShrlReg) {
+ DriverFn(&shrl_fn, "shrl");
+}
+
+TEST_F(AssemblerX86_64Test, ShrlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::shrl, 1U, "shrl ${imm}, %{reg}"), "shrli");
+}
+
+// Sarl only allows CL as the shift register.
+std::string sarl_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
+
+ x86_64::CpuRegister shifter(x86_64::RCX);
+ for (auto reg : registers) {
+ assembler->sarl(*reg, shifter);
+ str << "sarl %cl, %" << assembler_test->GetSecondaryRegisterName(*reg) << "\n";
+ }
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, SarlReg) {
+ DriverFn(&sarl_fn, "sarl");
+}
+
+TEST_F(AssemblerX86_64Test, SarlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::sarl, 1U, "sarl ${imm}, %{reg}"), "sarli");
+}
TEST_F(AssemblerX86_64Test, CmpqRegs) {
DriverStr(RepeatRR(&x86_64::X86_64Assembler::cmpq, "cmpq %{reg2}, %{reg1}"), "cmpq");
}
+TEST_F(AssemblerX86_64Test, CmpqImm) {
+ DriverStr(RepeatRI(&x86_64::X86_64Assembler::cmpq, 4U /* cmpq only supports 32b imm */,
+ "cmpq ${imm}, %{reg}"), "cmpqi");
+}
+
+TEST_F(AssemblerX86_64Test, CmplRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::cmpl, "cmp %{reg2}, %{reg1}"), "cmpl");
+}
+
+TEST_F(AssemblerX86_64Test, CmplImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::cmpl, 4U, "cmpl ${imm}, %{reg}"), "cmpli");
+}
+
+TEST_F(AssemblerX86_64Test, Testl) {
+ // Note: uses different order for GCC than usual. This makes GCC happy, and doesn't have an
+ // impact on functional correctness.
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::testl, "testl %{reg1}, %{reg2}"), "testl");
+}
+
+TEST_F(AssemblerX86_64Test, Negq) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::negq, "negq %{reg}"), "negq");
+}
+
+TEST_F(AssemblerX86_64Test, Negl) {
+ DriverStr(Repeatr(&x86_64::X86_64Assembler::negl, "negl %{reg}"), "negl");
+}
+
+TEST_F(AssemblerX86_64Test, Notq) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::notq, "notq %{reg}"), "notq");
+}
+
+TEST_F(AssemblerX86_64Test, Notl) {
+ DriverStr(Repeatr(&x86_64::X86_64Assembler::notl, "notl %{reg}"), "notl");
+}
+
+TEST_F(AssemblerX86_64Test, AndqRegs) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::andq, "andq %{reg2}, %{reg1}"), "andq");
+}
+
+TEST_F(AssemblerX86_64Test, AndqImm) {
+ DriverStr(RepeatRI(&x86_64::X86_64Assembler::andq, 4U /* andq only supports 32b imm */,
+ "andq ${imm}, %{reg}"), "andqi");
+}
+
+TEST_F(AssemblerX86_64Test, AndlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::andl, "andl %{reg2}, %{reg1}"), "andl");
+}
+
+TEST_F(AssemblerX86_64Test, AndlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::andl, 4U, "andl ${imm}, %{reg}"), "andli");
+}
+
+TEST_F(AssemblerX86_64Test, OrqRegs) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::orq, "orq %{reg2}, %{reg1}"), "orq");
+}
+
+TEST_F(AssemblerX86_64Test, OrlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::orl, "orl %{reg2}, %{reg1}"), "orl");
+}
+
+TEST_F(AssemblerX86_64Test, OrlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::orl, 4U, "orl ${imm}, %{reg}"), "orli");
+}
+
+TEST_F(AssemblerX86_64Test, XorqRegs) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::xorq, "xorq %{reg2}, %{reg1}"), "xorq");
+}
TEST_F(AssemblerX86_64Test, XorqImm) {
DriverStr(RepeatRI(&x86_64::X86_64Assembler::xorq, 4U, "xorq ${imm}, %{reg}"), "xorqi");
}
-TEST_F(AssemblerX86_64Test, Movaps) {
- GetAssembler()->movaps(x86_64::XmmRegister(x86_64::XMM0), x86_64::XmmRegister(x86_64::XMM8));
- DriverStr("movaps %xmm8, %xmm0", "movaps");
+TEST_F(AssemblerX86_64Test, XorlRegs) {
+ DriverStr(Repeatrr(&x86_64::X86_64Assembler::xorl, "xor %{reg2}, %{reg1}"), "xorl");
}
-TEST_F(AssemblerX86_64Test, Movd) {
- GetAssembler()->movd(x86_64::XmmRegister(x86_64::XMM0), x86_64::CpuRegister(x86_64::R11));
- GetAssembler()->movd(x86_64::XmmRegister(x86_64::XMM0), x86_64::CpuRegister(x86_64::RAX));
- GetAssembler()->movd(x86_64::XmmRegister(x86_64::XMM8), x86_64::CpuRegister(x86_64::R11));
- GetAssembler()->movd(x86_64::XmmRegister(x86_64::XMM8), x86_64::CpuRegister(x86_64::RAX));
- GetAssembler()->movd(x86_64::CpuRegister(x86_64::R11), x86_64::XmmRegister(x86_64::XMM0));
- GetAssembler()->movd(x86_64::CpuRegister(x86_64::RAX), x86_64::XmmRegister(x86_64::XMM0));
- GetAssembler()->movd(x86_64::CpuRegister(x86_64::R11), x86_64::XmmRegister(x86_64::XMM8));
- GetAssembler()->movd(x86_64::CpuRegister(x86_64::RAX), x86_64::XmmRegister(x86_64::XMM8));
- const char* expected =
- "movd %r11, %xmm0\n"
- "movd %rax, %xmm0\n"
- "movd %r11, %xmm8\n"
- "movd %rax, %xmm8\n"
- "movd %xmm0, %r11\n"
- "movd %xmm0, %rax\n"
- "movd %xmm8, %r11\n"
- "movd %xmm8, %rax\n";
- DriverStr(expected, "movd");
+TEST_F(AssemblerX86_64Test, XorlImm) {
+ DriverStr(Repeatri(&x86_64::X86_64Assembler::xorl, 4U, "xor ${imm}, %{reg}"), "xorli");
+}
+
+TEST_F(AssemblerX86_64Test, Xchgq) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::xchgq, "xchgq %{reg2}, %{reg1}"), "xchgq");
+}
+
+TEST_F(AssemblerX86_64Test, Xchgl) {
+ // Test is disabled because GCC generates 0x87 0xC0 for xchgl eax, eax. All other cases are the
+ // same. Anyone know why it doesn't emit a simple 0x90? It does so for xchgq rax, rax...
+ // DriverStr(Repeatrr(&x86_64::X86_64Assembler::xchgl, "xchgl %{reg2}, %{reg1}"), "xchgl");
}
TEST_F(AssemblerX86_64Test, Movl) {
- GetAssembler()->movl(x86_64::CpuRegister(x86_64::R8), x86_64::CpuRegister(x86_64::R11));
- GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::CpuRegister(x86_64::R11));
GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::Address(
x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12));
GetAssembler()->movl(x86_64::CpuRegister(x86_64::RAX), x86_64::Address(
@@ -170,8 +467,6 @@
GetAssembler()->movl(x86_64::CpuRegister(x86_64::R8), x86_64::Address(
x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12));
const char* expected =
- "movl %R11d, %R8d\n"
- "movl %R11d, %EAX\n"
"movl 0xc(%RDI,%RBX,4), %EAX\n"
"movl 0xc(%RDI,%R9,4), %EAX\n"
"movl 0xc(%RDI,%R9,4), %R8d\n";
@@ -186,17 +481,201 @@
DriverStr(expected, "movw");
}
-TEST_F(AssemblerX86_64Test, IMulImmediate) {
- GetAssembler()->imull(x86_64::CpuRegister(x86_64::RAX), x86_64::Immediate(0x40000));
- GetAssembler()->imull(x86_64::CpuRegister(x86_64::R8), x86_64::Immediate(0x40000));
- const char* expected =
- "imull $0x40000,%eax,%eax\n"
- "imull $0x40000,%r8d,%r8d\n";
- DriverStr(expected, "imul");
+TEST_F(AssemblerX86_64Test, Movsxd) {
+ DriverStr(RepeatRr(&x86_64::X86_64Assembler::movsxd, "movsxd %{reg2}, %{reg1}"), "movsxd");
+}
+
+///////////////////
+// FP Operations //
+///////////////////
+
+TEST_F(AssemblerX86_64Test, Movaps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::movaps, "movaps %{reg2}, %{reg1}"), "movaps");
+}
+
+TEST_F(AssemblerX86_64Test, Movss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::movss, "movss %{reg2}, %{reg1}"), "movss");
+}
+
+TEST_F(AssemblerX86_64Test, Movsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::movsd, "movsd %{reg2}, %{reg1}"), "movsd");
+}
+
+TEST_F(AssemblerX86_64Test, Movd1) {
+ DriverStr(RepeatFR(&x86_64::X86_64Assembler::movd, "movd %{reg2}, %{reg1}"), "movd.1");
+}
+
+TEST_F(AssemblerX86_64Test, Movd2) {
+ DriverStr(RepeatRF(&x86_64::X86_64Assembler::movd, "movd %{reg2}, %{reg1}"), "movd.2");
+}
+
+TEST_F(AssemblerX86_64Test, Addss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::addss, "addss %{reg2}, %{reg1}"), "addss");
+}
+
+TEST_F(AssemblerX86_64Test, Addsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::addsd, "addsd %{reg2}, %{reg1}"), "addsd");
+}
+
+TEST_F(AssemblerX86_64Test, Subss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::subss, "subss %{reg2}, %{reg1}"), "subss");
+}
+
+TEST_F(AssemblerX86_64Test, Subsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::subsd, "subsd %{reg2}, %{reg1}"), "subsd");
+}
+
+TEST_F(AssemblerX86_64Test, Mulss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulss, "mulss %{reg2}, %{reg1}"), "mulss");
+}
+
+TEST_F(AssemblerX86_64Test, Mulsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulsd, "mulsd %{reg2}, %{reg1}"), "mulsd");
+}
+
+TEST_F(AssemblerX86_64Test, Divss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::divss, "divss %{reg2}, %{reg1}"), "divss");
+}
+
+TEST_F(AssemblerX86_64Test, Divsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::divsd, "divsd %{reg2}, %{reg1}"), "divsd");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2ss) {
+ DriverStr(RepeatFr(&x86_64::X86_64Assembler::cvtsi2ss, "cvtsi2ss %{reg2}, %{reg1}"), "cvtsi2ss");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2sd) {
+ DriverStr(RepeatFr(&x86_64::X86_64Assembler::cvtsi2sd, "cvtsi2sd %{reg2}, %{reg1}"), "cvtsi2sd");
}
-std::string setcc_test_fn(x86_64::X86_64Assembler* assembler) {
+TEST_F(AssemblerX86_64Test, Cvtss2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvtss2si, "cvtss2si %{reg2}, %{reg1}"), "cvtss2si");
+}
+
+
+TEST_F(AssemblerX86_64Test, Cvtss2sd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtss2sd, "cvtss2sd %{reg2}, %{reg1}"), "cvtss2sd");
+}
+
+
+TEST_F(AssemblerX86_64Test, Cvtsd2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvtsd2si, "cvtsd2si %{reg2}, %{reg1}"), "cvtsd2si");
+}
+
+TEST_F(AssemblerX86_64Test, Cvttss2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvttss2si, "cvttss2si %{reg2}, %{reg1}"),
+ "cvttss2si");
+}
+
+TEST_F(AssemblerX86_64Test, Cvttsd2si) {
+ DriverStr(RepeatrF(&x86_64::X86_64Assembler::cvttsd2si, "cvttsd2si %{reg2}, %{reg1}"),
+ "cvttsd2si");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsd2ss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtsd2ss, "cvtsd2ss %{reg2}, %{reg1}"), "cvtsd2ss");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtdq2pd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::cvtdq2pd, "cvtdq2pd %{reg2}, %{reg1}"), "cvtdq2pd");
+}
+
+TEST_F(AssemblerX86_64Test, Comiss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::comiss, "comiss %{reg2}, %{reg1}"), "comiss");
+}
+
+TEST_F(AssemblerX86_64Test, Comisd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::comisd, "comisd %{reg2}, %{reg1}"), "comisd");
+}
+
+TEST_F(AssemblerX86_64Test, Sqrtss) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::sqrtss, "sqrtss %{reg2}, %{reg1}"), "sqrtss");
+}
+
+TEST_F(AssemblerX86_64Test, Sqrtsd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::sqrtsd, "sqrtsd %{reg2}, %{reg1}"), "sqrtsd");
+}
+
+TEST_F(AssemblerX86_64Test, Xorps) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::xorps, "xorps %{reg2}, %{reg1}"), "xorps");
+}
+
+TEST_F(AssemblerX86_64Test, Xorpd) {
+ DriverStr(RepeatFF(&x86_64::X86_64Assembler::xorpd, "xorpd %{reg2}, %{reg1}"), "xorpd");
+}
+
+// X87
+
+std::string x87_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ assembler->fincstp();
+ str << "fincstp\n";
+
+ assembler->fsin();
+ str << "fsin\n";
+
+ assembler->fcos();
+ str << "fcos\n";
+
+ assembler->fptan();
+ str << "fptan\n";
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, X87) {
+ DriverFn(&x87_fn, "x87");
+}
+
+////////////////
+// CALL / JMP //
+////////////////
+
+TEST_F(AssemblerX86_64Test, Call) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::call, "call *%{reg}"), "call");
+}
+
+TEST_F(AssemblerX86_64Test, Jmp) {
+ DriverStr(RepeatR(&x86_64::X86_64Assembler::jmp, "jmp *%{reg}"), "jmp");
+}
+
+TEST_F(AssemblerX86_64Test, Enter) {
+ DriverStr(RepeatI(&x86_64::X86_64Assembler::enter, 2U /* 16b immediate */, "enter ${imm}, $0",
+ true /* Only non-negative number */), "enter");
+}
+
+TEST_F(AssemblerX86_64Test, RetImm) {
+ DriverStr(RepeatI(&x86_64::X86_64Assembler::ret, 2U /* 16b immediate */, "ret ${imm}",
+ true /* Only non-negative number */), "reti");
+}
+
+std::string ret_and_leave_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
+ std::ostringstream str;
+
+ assembler->ret();
+ str << "ret\n";
+
+ assembler->leave();
+ str << "leave\n";
+
+ return str.str();
+}
+
+TEST_F(AssemblerX86_64Test, RetAndLeave) {
+ DriverFn(&ret_and_leave_fn, "retleave");
+}
+
+//////////
+// MISC //
+//////////
+
+std::string setcc_test_fn(AssemblerX86_64Test::Base* assembler_test,
+ x86_64::X86_64Assembler* assembler) {
// From Condition
/*
kOverflow = 0,
@@ -218,23 +697,7 @@
std::string suffixes[15] = { "o", "no", "b", "ae", "e", "ne", "be", "a", "s", "ns", "pe", "po",
"l", "ge", "le" };
- std::vector<x86_64::CpuRegister*> registers;
- registers.push_back(new x86_64::CpuRegister(x86_64::RAX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RBX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RCX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RDX));
- registers.push_back(new x86_64::CpuRegister(x86_64::RBP));
- registers.push_back(new x86_64::CpuRegister(x86_64::RSP));
- registers.push_back(new x86_64::CpuRegister(x86_64::RSI));
- registers.push_back(new x86_64::CpuRegister(x86_64::RDI));
- registers.push_back(new x86_64::CpuRegister(x86_64::R8));
- registers.push_back(new x86_64::CpuRegister(x86_64::R9));
- registers.push_back(new x86_64::CpuRegister(x86_64::R10));
- registers.push_back(new x86_64::CpuRegister(x86_64::R11));
- registers.push_back(new x86_64::CpuRegister(x86_64::R12));
- registers.push_back(new x86_64::CpuRegister(x86_64::R13));
- registers.push_back(new x86_64::CpuRegister(x86_64::R14));
- registers.push_back(new x86_64::CpuRegister(x86_64::R15));
+ std::vector<x86_64::CpuRegister*> registers = assembler_test->GetRegisters();
std::string byte_regs[16];
byte_regs[x86_64::RAX] = "al";
@@ -263,7 +726,6 @@
}
}
- STLDeleteElements(®isters);
return str.str();
}
@@ -279,7 +741,8 @@
return x86_64::X86_64ManagedRegister::FromXmmRegister(r);
}
-std::string buildframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -323,7 +786,8 @@
DriverFn(&buildframe_test_fn, "BuildFrame");
}
-std::string removeframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -351,7 +815,8 @@
DriverFn(&removeframe_test_fn, "RemoveFrame");
}
-std::string increaseframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
assembler->IncreaseFrameSize(0U);
assembler->IncreaseFrameSize(kStackAlignment);
assembler->IncreaseFrameSize(10 * kStackAlignment);
@@ -369,7 +834,8 @@
DriverFn(&increaseframe_test_fn, "IncreaseFrame");
}
-std::string decreaseframe_test_fn(x86_64::X86_64Assembler* assembler) {
+std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64Assembler* assembler) {
assembler->DecreaseFrameSize(0U);
assembler->DecreaseFrameSize(kStackAlignment);
assembler->DecreaseFrameSize(10 * kStackAlignment);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d87faeb..2d2a82e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -31,8 +31,9 @@
#endif
#define ATRACE_TAG ATRACE_TAG_DALVIK
-#include "cutils/trace.h"
+#include <cutils/trace.h>
+#include "arch/instruction_set_features.h"
#include "base/dumpable.h"
#include "base/stl_util.h"
#include "base/stringpiece.h"
@@ -430,6 +431,8 @@
image_base_(0U),
image_classes_zip_filename_(nullptr),
image_classes_filename_(nullptr),
+ compiled_classes_zip_filename_(nullptr),
+ compiled_classes_filename_(nullptr),
image_(false),
is_host_(false),
dump_stats_(false),
@@ -540,6 +543,10 @@
image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
} else if (option.starts_with("--image-classes-zip=")) {
image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data();
+ } else if (option.starts_with("--compiled-classes=")) {
+ compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
+ } else if (option.starts_with("--compiled-classes-zip=")) {
+ compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
} else if (option.starts_with("--base=")) {
const char* image_base_str = option.substr(strlen("--base=")).data();
char* end;
@@ -571,11 +578,18 @@
}
} else if (option.starts_with("--instruction-set-features=")) {
StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
- instruction_set_features_.reset(
- InstructionSetFeatures::FromFeatureString(instruction_set_, str.as_string(),
- &error_msg));
if (instruction_set_features_.get() == nullptr) {
- Usage("%s", error_msg.c_str());
+ instruction_set_features_.reset(
+ InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Problem initializing default instruction set features variant: %s",
+ error_msg.c_str());
+ }
+ }
+ instruction_set_features_.reset(
+ instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Error parsing '%s': %s", option.data(), error_msg.c_str());
}
} else if (option.starts_with("--compiler-backend=")) {
StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
@@ -583,6 +597,7 @@
compiler_kind_ = Compiler::kQuick;
} else if (backend_str == "Optimizing") {
compiler_kind_ = Compiler::kOptimizing;
+ compile_pic = true;
} else if (backend_str == "Portable") {
compiler_kind_ = Compiler::kPortable;
} else {
@@ -742,6 +757,18 @@
Usage("--image-classes-zip should be used with --image-classes");
}
+ if (compiled_classes_filename_ != nullptr && !image_) {
+ Usage("--compiled-classes should only be used with --image");
+ }
+
+ if (compiled_classes_filename_ != nullptr && !boot_image_option_.empty()) {
+ Usage("--compiled-classes should not be used with --boot-image");
+ }
+
+ if (compiled_classes_zip_filename_ != nullptr && compiled_classes_filename_ == nullptr) {
+ Usage("--compiled-classes-zip should be used with --compiled-classes");
+ }
+
if (dex_filenames_.empty() && zip_fd_ == -1) {
Usage("Input must be supplied with either --dex-file or --zip-fd");
}
@@ -783,7 +810,11 @@
// instruction set.
if (instruction_set_features_.get() == nullptr) {
instruction_set_features_.reset(
- InstructionSetFeatures::FromFeatureString(instruction_set_, "default", &error_msg));
+ InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg));
+ if (instruction_set_features_.get() == nullptr) {
+ Usage("Problem initializing default instruction set features variant: %s",
+ error_msg.c_str());
+ }
}
if (instruction_set_ == kRuntimeISA) {
@@ -985,6 +1016,25 @@
} else if (image_) {
image_classes_.reset(new std::set<std::string>);
}
+ // If --compiled-classes was specified, calculate the full list of classes to compile in the
+ // image.
+ if (compiled_classes_filename_ != nullptr) {
+ std::string error_msg;
+ if (compiled_classes_zip_filename_ != nullptr) {
+ compiled_classes_.reset(ReadImageClassesFromZip(compiled_classes_zip_filename_,
+ compiled_classes_filename_,
+ &error_msg));
+ } else {
+ compiled_classes_.reset(ReadImageClassesFromFile(compiled_classes_filename_));
+ }
+ if (compiled_classes_.get() == nullptr) {
+ LOG(ERROR) << "Failed to create list of compiled classes from '"
+ << compiled_classes_filename_ << "': " << error_msg;
+ return false;
+ }
+ } else if (image_) {
+ compiled_classes_.reset(nullptr); // By default compile everything.
+ }
if (boot_image_option_.empty()) {
dex_files_ = Runtime::Current()->GetClassLinker()->GetBootClassPath();
@@ -1088,6 +1138,7 @@
instruction_set_features_.get(),
image_,
image_classes_.release(),
+ compiled_classes_.release(),
thread_count_,
dump_stats_,
dump_passes_,
@@ -1513,7 +1564,10 @@
uintptr_t image_base_;
const char* image_classes_zip_filename_;
const char* image_classes_filename_;
+ const char* compiled_classes_zip_filename_;
+ const char* compiled_classes_filename_;
std::unique_ptr<std::set<std::string>> image_classes_;
+ std::unique_ptr<std::set<std::string>> compiled_classes_;
bool image_;
std::unique_ptr<ImageWriter> image_writer_;
bool is_host_;
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index 487f433..9cd631c 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -21,8 +21,8 @@
#include <iosfwd>
+#include "arch/instruction_set.h"
#include "base/macros.h"
-#include "instruction_set.h"
namespace art {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index cdf48c3..d6309f7 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -23,6 +23,7 @@
#include <unordered_map>
#include <vector>
+#include "arch/instruction_set_features.h"
#include "base/stringpiece.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
@@ -107,6 +108,10 @@
" --no-disassemble may be used to disable disassembly.\n"
" Example: --no-disassemble\n"
"\n");
+ fprintf(stderr,
+ " --method-filter=<method name>: only dumps methods that contain the filter.\n"
+ " Example: --method-filter=foo\n"
+ "\n");
}
const char* image_roots_descriptions_[] = {
@@ -356,12 +361,14 @@
bool dump_vmap,
bool disassemble_code,
bool absolute_addresses,
+ const char* method_filter,
Handle<mirror::ClassLoader>* class_loader)
: dump_raw_mapping_table_(dump_raw_mapping_table),
dump_raw_gc_map_(dump_raw_gc_map),
dump_vmap_(dump_vmap),
disassemble_code_(disassemble_code),
absolute_addresses_(absolute_addresses),
+ method_filter_(method_filter),
class_loader_(class_loader) {}
const bool dump_raw_mapping_table_;
@@ -369,6 +376,7 @@
const bool dump_vmap_;
const bool disassemble_code_;
const bool absolute_addresses_;
+ const char* const method_filter_;
Handle<mirror::ClassLoader>* class_loader_;
};
@@ -686,8 +694,13 @@
uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
uint32_t method_access_flags) {
bool success = true;
+ std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
+ if (pretty_method.find(options_->method_filter_) == std::string::npos) {
+ return success;
+ }
+
os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
- class_method_index, PrettyMethod(dex_method_idx, dex_file, true).c_str(),
+ class_method_index, pretty_method.c_str(),
dex_method_idx);
Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::unique_ptr<std::ostream> indent1_os(new std::ostream(&indent1_filter));
@@ -2179,6 +2192,8 @@
} else if (option.starts_with("--symbolize=")) {
oat_filename_ = option.substr(strlen("--symbolize=")).data();
symbolize_ = true;
+ } else if (option.starts_with("--method-filter=")) {
+ method_filter_ = option.substr(strlen("--method-filter=")).data();
} else {
fprintf(stderr, "Unknown argument %s\n", option.data());
usage();
@@ -2200,6 +2215,7 @@
}
const char* oat_filename_ = nullptr;
+ const char* method_filter_ = "";
const char* image_location_ = nullptr;
const char* boot_image_location_ = nullptr;
InstructionSet instruction_set_ = kRuntimeISA;
@@ -2231,6 +2247,7 @@
args.dump_vmap_,
args.disassemble_code_,
absolute_addresses,
+ args.method_filter_,
nullptr));
std::unique_ptr<Runtime> runtime;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 75160ca..6b6d11e 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -34,12 +34,8 @@
#include "elf_file_impl.h"
#include "gc/space/image_space.h"
#include "image.h"
-#include "instruction_set.h"
-#include "mirror/art_field.h"
#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/reference.h"
#include "noop_compiler_callbacks.h"
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 0ceef64..5a3545b 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -17,14 +17,14 @@
#ifndef ART_PATCHOAT_PATCHOAT_H_
#define ART_PATCHOAT_PATCHOAT_H_
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "instruction_set.h"
-#include "os.h"
#include "elf_file.h"
#include "elf_utils.h"
#include "gc/accounting/space_bitmap.h"
#include "gc/heap.h"
+#include "os.h"
#include "utils.h"
namespace art {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 4505b8e..25fe45f 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -73,7 +73,6 @@
hprof/hprof.cc \
image.cc \
indirect_reference_table.cc \
- instruction_set.cc \
instrumentation.cc \
intern_table.cc \
interpreter/interpreter.cc \
@@ -165,11 +164,18 @@
LIBART_COMMON_SRC_FILES += \
arch/context.cc \
+ arch/instruction_set.cc \
+ arch/instruction_set_features.cc \
arch/memcmp16.cc \
+ arch/arm/instruction_set_features_arm.cc \
arch/arm/registers_arm.cc \
+ arch/arm64/instruction_set_features_arm64.cc \
arch/arm64/registers_arm64.cc \
- arch/x86/registers_x86.cc \
+ arch/mips/instruction_set_features_mips.cc \
arch/mips/registers_mips.cc \
+ arch/x86/instruction_set_features_x86.cc \
+ arch/x86/registers_x86.cc \
+ arch/x86_64/registers_x86_64.cc \
entrypoints/entrypoint_utils.cc \
entrypoints/interpreter/interpreter_entrypoints.cc \
entrypoints/jni/jni_entrypoints.cc \
@@ -216,7 +222,7 @@
LIBART_TARGET_SRC_FILES_arm := \
arch/arm/context_arm.cc.arm \
arch/arm/entrypoints_init_arm.cc \
- arch/arm/instruction_set_features_arm.S \
+ arch/arm/instruction_set_features_assembly_tests.S \
arch/arm/jni_entrypoints_arm.S \
arch/arm/memcmp16_arm.S \
arch/arm/portable_entrypoints_arm.S \
@@ -292,7 +298,7 @@
$(LIBART_SRC_FILES_x86_64)
LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
- arch/x86_64/registers_x86_64.h \
+ arch/instruction_set.h \
base/allocator.h \
base/mutex.h \
debugger.h \
@@ -306,7 +312,6 @@
gc/heap.h \
instrumentation.h \
indirect_reference_table.h \
- instruction_set.h \
invoke_type.h \
jdwp/jdwp.h \
jdwp/jdwp_constants.h \
@@ -339,7 +344,7 @@
2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := default
ifeq ($(DEX2OAT_TARGET_ARCH),arm)
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a15 krait denver))
- LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := lpae,div
+ LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := atomic_ldrd_strd,div
else
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a7))
LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := div
@@ -348,7 +353,7 @@
endif
ifeq ($(2ND_DEX2OAT_TARGET_ARCH),arm)
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a15 krait denver))
- 2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := lpae,div
+ 2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := atomic_ldrd_strd,div
else
ifneq (,$(filter $(DEX2OAT_TARGET_CPU_VARIANT),cortex-a7))
2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := div
@@ -465,7 +470,7 @@
LOCAL_SHARED_LIBRARIES := libnativehelper libnativebridge libsigchain
include external/libcxx/libcxx.mk
- LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
+ LOCAL_SHARED_LIBRARIES += libbacktrace
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libdl
# ZipArchive support, the order matters here to get all symbols.
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 480190a..325b283 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -23,7 +23,6 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
-#include "instruction_set.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
#include "thread.h"
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
new file mode 100644
index 0000000..f49c037
--- /dev/null
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm.h"
+
+#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+#include <sys/auxv.h>
+#include <asm/hwcap.h>
+#endif
+
+#include "signal.h"
+#include <fstream>
+
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+#if defined(__arm__)
+extern "C" bool artCheckForArmSdivInstruction();
+#endif
+
+namespace art {
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
+ const std::string& variant, std::string* error_msg) {
+ // Assume all ARM processors are SMP.
+ // TODO: set the SMP support based on variant.
+ const bool smp = true;
+
+ // Look for variants that have divide support.
+ static const char* arm_variants_with_div[] = {
+ "cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57",
+ "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
+ "cyclone", "denver", "krait", "swift"};
+
+ bool has_div = FindVariantInArray(arm_variants_with_div, arraysize(arm_variants_with_div),
+ variant);
+
+ // Look for variants that have LPAE support.
+ static const char* arm_variants_with_lpae[] = {
+ "cortex-a7", "cortex-a15", "krait", "denver"
+ };
+ bool has_lpae = FindVariantInArray(arm_variants_with_lpae, arraysize(arm_variants_with_lpae),
+ variant);
+
+ if (has_div == false && has_lpae == false) {
+ // Avoid unsupported variants.
+ static const char* unsupported_arm_variants[] = {
+ // ARM processors that aren't ARMv7 compatible aren't supported.
+ "arm2", "arm250", "arm3", "arm6", "arm60", "arm600", "arm610", "arm620",
+ "cortex-m0", "cortex-m0plus", "cortex-m1",
+ "fa526", "fa626", "fa606te", "fa626te", "fmp626", "fa726te",
+ "iwmmxt", "iwmmxt2",
+ "strongarm", "strongarm110", "strongarm1100", "strongarm1110",
+ "xscale"
+ };
+ if (FindVariantInArray(unsupported_arm_variants, arraysize(unsupported_arm_variants),
+ variant)) {
+ *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str());
+ return nullptr;
+ }
+ // Warn if the variant is unknown.
+ // TODO: some of the variants below may have feature support, but that support is currently
+ // unknown so we'll choose conservative (sub-optimal) defaults without warning.
+ // TODO: some of the architectures may not support all features required by ART and should be
+ // moved to unsupported_arm_variants[] above.
+ static const char* arm_variants_without_known_features[] = {
+ "default",
+ "arm7", "arm7m", "arm7d", "arm7dm", "arm7di", "arm7dmi", "arm70", "arm700", "arm700i",
+ "arm710", "arm710c", "arm7100", "arm720", "arm7500", "arm7500fe", "arm7tdmi", "arm7tdmi-s",
+ "arm710t", "arm720t", "arm740t",
+ "arm8", "arm810",
+ "arm9", "arm9e", "arm920", "arm920t", "arm922t", "arm946e-s", "arm966e-s", "arm968e-s",
+ "arm926ej-s", "arm940t", "arm9tdmi",
+ "arm10tdmi", "arm1020t", "arm1026ej-s", "arm10e", "arm1020e", "arm1022e",
+ "arm1136j-s", "arm1136jf-s",
+ "arm1156t2-s", "arm1156t2f-s", "arm1176jz-s", "arm1176jzf-s",
+ "cortex-a5", "cortex-a8", "cortex-a9", "cortex-a9-mp", "cortex-r4f",
+ "marvell-pj4", "mpcore", "mpcorenovfp"
+ };
+ if (!FindVariantInArray(arm_variants_without_known_features,
+ arraysize(arm_variants_without_known_features),
+ variant)) {
+ LOG(WARNING) << "Unknown instruction set features for ARM CPU variant (" << variant
+ << ") using conservative defaults";
+ }
+ }
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool has_div = (bitmap & kDivBitfield) != 0;
+ bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0;
+ return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
+#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
+ const bool smp = false;
+#else
+ const bool smp = true;
+#endif
+
+#if defined(__ARM_ARCH_EXT_IDIV__)
+ const bool has_div = true;
+#else
+ const bool has_div = false;
+#endif
+#if defined(__ARM_FEATURE_LPAE)
+ const bool has_lpae = true;
+#else
+ const bool has_lpae = false;
+#endif
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+ bool has_lpae = false;
+ bool has_div = false;
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("Features") != std::string::npos) {
+ LOG(INFO) << "found features";
+ if (line.find("idivt") != std::string::npos) {
+ // We always expect both ARM and Thumb divide instructions to be available or not
+ // available.
+ CHECK_NE(line.find("idiva"), std::string::npos);
+ has_div = true;
+ }
+ if (line.find("lpae") != std::string::npos) {
+ has_lpae = true;
+ }
+ } else if (line.find("processor") != std::string::npos &&
+ line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
+ bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
+
+ bool has_div = false;
+ bool has_lpae = false;
+
+#if defined(HAVE_ANDROID_OS) && defined(__arm__)
+ uint64_t hwcaps = getauxval(AT_HWCAP);
+ LOG(INFO) << "hwcaps=" << hwcaps;
+ if ((hwcaps & HWCAP_IDIVT) != 0) {
+ // We always expect both ARM and Thumb divide instructions to be available or not
+ // available.
+ CHECK_NE(hwcaps & HWCAP_IDIVA, 0U);
+ has_div = true;
+ }
+ if ((hwcaps & HWCAP_LPAE) != 0) {
+ has_lpae = true;
+ }
+#endif
+
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+// A signal handler called by a fault for an illegal instruction. We record the fact in r0
+// and then increment the PC in the signal context to return to the next instruction. We know the
+// instruction is an sdiv (4 bytes long).
+static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
+ void* data) {
+#if defined(__arm__)
+ struct ucontext *uc = (struct ucontext *)data;
+ struct sigcontext *sc = &uc->uc_mcontext;
+ sc->arm_r0 = 0; // Set R0 to #0 to signal error.
+ sc->arm_pc += 4; // Skip offending instruction.
+#else
+ UNUSED(data);
+#endif
+}
+
+const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
+#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
+ const bool smp = false;
+#else
+ const bool smp = true;
+#endif
+ // See if have a sdiv instruction. Register a signal handler and try to execute an sdiv
+ // instruction. If we get a SIGILL then it's not supported.
+ struct sigaction sa, osa;
+ sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
+ sa.sa_sigaction = bad_divide_inst_handle;
+ sigaction(SIGILL, &sa, &osa);
+
+ bool has_div = false;
+#if defined(__arm__)
+ if (artCheckForArmSdivInstruction()) {
+ has_div = true;
+ }
+#endif
+
+ // Restore the signal handler.
+ sigaction(SIGILL, &osa, nullptr);
+
+ // Use compile time features to "detect" LPAE support.
+ // TODO: write an assembly LPAE support test.
+#if defined(__ARM_FEATURE_LPAE)
+ const bool has_lpae = true;
+#else
+ const bool has_lpae = false;
+#endif
+ return new ArmInstructionSetFeatures(smp, has_div, has_lpae);
+}
+
+bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (kArm != other->GetInstructionSet()) {
+ return false;
+ }
+ const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
+ return IsSmp() == other_as_arm->IsSmp() &&
+ has_div_ == other_as_arm->has_div_ &&
+ has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_;
+}
+
+uint32_t ArmInstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) |
+ (has_div_ ? kDivBitfield : 0) |
+ (has_atomic_ldrd_strd_ ? kAtomicLdrdStrdBitfield : 0);
+}
+
+std::string ArmInstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (has_div_) {
+ result += ",div";
+ } else {
+ result += ",-div";
+ }
+ if (has_atomic_ldrd_strd_) {
+ result += ",atomic_ldrd_strd";
+ } else {
+ result += ",-atomic_ldrd_strd";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* ArmInstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
+ bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
+ bool has_div = has_div_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "div") {
+ has_div = true;
+ } else if (feature == "-div") {
+ has_div = false;
+ } else if (feature == "atomic_ldrd_strd") {
+ has_atomic_ldrd_strd = true;
+ } else if (feature == "-atomic_ldrd_strd") {
+ has_atomic_ldrd_strd = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ return new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h
new file mode 100644
index 0000000..221bf1f
--- /dev/null
+++ b/runtime/arch/arm/instruction_set_features_arm.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_INSTRUCTION_SET_FEATURES_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_INSTRUCTION_SET_FEATURES_ARM_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the ARM architecture.
+class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const ArmInstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const ArmInstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const ArmInstructionSetFeatures* FromAssembly();
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kArm;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ // Return a string of the form "div,lpae" or "none".
+ std::string GetFeatureString() const OVERRIDE;
+
+ // Is the divide instruction feature enabled?
+ bool HasDivideInstruction() const {
+ return has_div_;
+ }
+
+ // Are the ldrd and strd instructions atomic? This is commonly true when the Large Physical
+ // Address Extension (LPAE) is present.
+ bool HasAtomicLdrdAndStrd() const {
+ return has_atomic_ldrd_strd_;
+ }
+
+ virtual ~ArmInstructionSetFeatures() {}
+
+ protected:
+ // Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures.
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE;
+
+ private:
+ ArmInstructionSetFeatures(bool smp, bool has_div, bool has_atomic_ldrd_strd)
+ : InstructionSetFeatures(smp),
+ has_div_(has_div), has_atomic_ldrd_strd_(has_atomic_ldrd_strd) {
+ }
+
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kDivBitfield = 2,
+ kAtomicLdrdStrdBitfield = 4,
+ };
+
+ const bool has_div_;
+ const bool has_atomic_ldrd_strd_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArmInstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_ARM_INSTRUCTION_SET_FEATURES_ARM_H_
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
new file mode 100644
index 0000000..44b1640
--- /dev/null
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
+ // Build features for a 32-bit ARM krait processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> krait_features(
+ InstructionSetFeatures::FromVariant(kArm, "krait", &error_msg));
+ ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(krait_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_EQ(krait_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit ARM denver processor.
+ std::unique_ptr<const InstructionSetFeatures> denver_features(
+ InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
+ ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(denver_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit ARMv7 processor.
+ std::unique_ptr<const InstructionSetFeatures> arm7_features(
+ InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
+ ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str());
+ EXPECT_EQ(arm7_features->AsBitmap(), 1U);
+
+ // ARM6 is not a supported architecture variant.
+ std::unique_ptr<const InstructionSetFeatures> arm6_features(
+ InstructionSetFeatures::FromVariant(kArm, "arm6", &error_msg));
+ EXPECT_TRUE(arm6_features.get() == nullptr);
+ EXPECT_NE(error_msg.size(), 0U);
+}
+
+TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> base_features(
+ InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
+ ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
+
+ // Build features for a 32-bit ARM with LPAE and div processor.
+ std::unique_ptr<const InstructionSetFeatures> krait_features(
+ base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
+ ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(krait_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str());
+ EXPECT_EQ(krait_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit ARM processor with LPAE and div flipped.
+ std::unique_ptr<const InstructionSetFeatures> denver_features(
+ base_features->AddFeaturesFromString("div,atomic_ldrd_strd", &error_msg));
+ ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(denver_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->Equals(krait_features.get()));
+ EXPECT_TRUE(krait_features->Equals(denver_features.get()));
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str());
+ EXPECT_EQ(denver_features->AsBitmap(), 7U);
+
+ // Build features for a 32-bit default ARM processor.
+ std::unique_ptr<const InstructionSetFeatures> arm7_features(
+ base_features->AddFeaturesFromString("default", &error_msg));
+ ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
+ EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("smp,-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str());
+ EXPECT_EQ(arm7_features->AsBitmap(), 1U);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm/instruction_set_features_arm.S b/runtime/arch/arm/instruction_set_features_assembly_tests.S
similarity index 94%
rename from runtime/arch/arm/instruction_set_features_arm.S
rename to runtime/arch/arm/instruction_set_features_assembly_tests.S
index c26f2cd..c1086df 100644
--- a/runtime/arch/arm/instruction_set_features_arm.S
+++ b/runtime/arch/arm/instruction_set_features_assembly_tests.S
@@ -23,7 +23,7 @@
// caller must arrange for the signal handler to set the r0
// register to 0 and move the pc forward by 4 bytes (to skip
// the invalid instruction).
-ENTRY artCheckForARMSDIVInstruction
+ENTRY artCheckForArmSdivInstruction
mov r1,#1
// depending on the architecture, the assembler will not allow an
// sdiv instruction, so we will have to output the bytes directly.
@@ -35,4 +35,4 @@
// It will have 0 otherwise (set by the signal handler)
// the value is just returned from this function.
bx lr
-END artCheckForARMSDIVInstruction
+END artCheckForArmSdivInstruction
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
new file mode 100644
index 0000000..696dd94
--- /dev/null
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm64.h"
+
+#include <fstream>
+#include <sstream>
+
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+namespace art {
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
+ const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED) {
+ if (variant != "default" && variant != "generic") {
+ std::ostringstream os;
+ os << "Unexpected CPU variant for Arm64: " << variant;
+ *error_msg = os.str();
+ return nullptr;
+ }
+ const bool smp = true; // Conservative default.
+ const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool is_a53 = (bitmap & kA53Bitfield) != 0;
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCppDefines() {
+#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
+ const bool smp = false;
+#else
+ const bool smp = true;
+#endif
+
+ const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromCpuInfo() {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+ const bool is_a53 = true; // Conservative default.
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("processor") != std::string::npos && line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromHwcap() {
+ bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1;
+ const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromAssembly() {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines();
+}
+
+bool Arm64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (kArm64 != other->GetInstructionSet()) {
+ return false;
+ }
+ const Arm64InstructionSetFeatures* other_as_arm = other->AsArm64InstructionSetFeatures();
+ return fix_cortex_a53_835769_ == other_as_arm->fix_cortex_a53_835769_;
+}
+
+uint32_t Arm64InstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) | (fix_cortex_a53_835769_ ? kA53Bitfield : 0);
+}
+
+std::string Arm64InstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (fix_cortex_a53_835769_) {
+ result += ",a53";
+ } else {
+ result += ",-a53";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
+ bool is_a53 = fix_cortex_a53_835769_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "a53") {
+ is_a53 = true;
+ } else if (feature == "-a53") {
+ is_a53 = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ return new Arm64InstructionSetFeatures(smp, is_a53);
+}
+
+} // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
new file mode 100644
index 0000000..ee41536
--- /dev/null
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM64_INSTRUCTION_SET_FEATURES_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_INSTRUCTION_SET_FEATURES_ARM64_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the ARM64 architecture.
+class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromBitmap(uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const Arm64InstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const Arm64InstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const Arm64InstructionSetFeatures* FromAssembly();
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kArm64;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ // Return a string of the form "a53" or "none".
+ std::string GetFeatureString() const OVERRIDE;
+
+ // Generate code addressing Cortex-A53 erratum 835769?
+ bool NeedFixCortexA53_835769() const {
+ return fix_cortex_a53_835769_;
+ }
+
+ virtual ~Arm64InstructionSetFeatures() {}
+
+ protected:
+ // Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures.
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE;
+
+ private:
+ explicit Arm64InstructionSetFeatures(bool smp, bool is_a53)
+ : InstructionSetFeatures(smp), fix_cortex_a53_835769_(is_a53) {
+ }
+
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kA53Bitfield = 2,
+ };
+
+ const bool fix_cortex_a53_835769_;
+
+ DISALLOW_COPY_AND_ASSIGN(Arm64InstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_ARM64_INSTRUCTION_SET_FEATURES_ARM64_H_
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
new file mode 100644
index 0000000..027e59c
--- /dev/null
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_arm64.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(Arm64InstructionSetFeaturesTest, Arm64Features) {
+ // Build features for an ARM64 processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> arm64_features(
+ InstructionSetFeatures::FromVariant(kArm64, "default", &error_msg));
+ ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(arm64_features->GetInstructionSet(), kArm64);
+ EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
+ EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
+ EXPECT_EQ(arm64_features->AsBitmap(), 3U);
+}
+
+} // namespace art
diff --git a/runtime/arch/instruction_set.cc b/runtime/arch/instruction_set.cc
new file mode 100644
index 0000000..92fa727
--- /dev/null
+++ b/runtime/arch/instruction_set.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set.h"
+
+#include "globals.h"
+
+namespace art {
+
+const char* GetInstructionSetString(const InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ return "arm";
+ case kArm64:
+ return "arm64";
+ case kX86:
+ return "x86";
+ case kX86_64:
+ return "x86_64";
+ case kMips:
+ return "mips";
+ case kMips64:
+ return "mips64";
+ case kNone:
+ return "none";
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+InstructionSet GetInstructionSetFromString(const char* isa_str) {
+ CHECK(isa_str != nullptr);
+
+ if (strcmp("arm", isa_str) == 0) {
+ return kArm;
+ } else if (strcmp("arm64", isa_str) == 0) {
+ return kArm64;
+ } else if (strcmp("x86", isa_str) == 0) {
+ return kX86;
+ } else if (strcmp("x86_64", isa_str) == 0) {
+ return kX86_64;
+ } else if (strcmp("mips", isa_str) == 0) {
+ return kMips;
+ } else if (strcmp("mips64", isa_str) == 0) {
+ return kMips;
+ }
+
+ return kNone;
+}
+
+size_t GetInstructionSetAlignment(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return kArmAlignment;
+ case kArm64:
+ return kArm64Alignment;
+ case kX86:
+ // Fall-through.
+ case kX86_64:
+ return kX86Alignment;
+ case kMips:
+ return kMipsAlignment;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have alignment.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
+static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
+
+static constexpr size_t kArmStackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kArm64StackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kX86StackOverflowReservedBytes = 8 * KB;
+static constexpr size_t kX86_64StackOverflowReservedBytes = 8 * KB;
+
+size_t GetStackOverflowReservedBytes(InstructionSet isa) {
+ switch (isa) {
+ case kArm: // Intentional fall-through.
+ case kThumb2:
+ return kArmStackOverflowReservedBytes;
+
+ case kArm64:
+ return kArm64StackOverflowReservedBytes;
+
+ case kMips:
+ return kMipsStackOverflowReservedBytes;
+
+ case kX86:
+ return kX86StackOverflowReservedBytes;
+
+ case kX86_64:
+ return kX86_64StackOverflowReservedBytes;
+
+ case kNone:
+ LOG(FATAL) << "kNone has no stack overflow size";
+ UNREACHABLE();
+
+ default:
+ LOG(FATAL) << "Unknown instruction set" << isa;
+ UNREACHABLE();
+ }
+}
+
+} // namespace art
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
new file mode 100644
index 0000000..46622eb
--- /dev/null
+++ b/runtime/arch/instruction_set.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
+#define ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
+
+#include <iosfwd>
+#include <string>
+
+#include "base/logging.h" // Logging is required for FATAL in the helper functions.
+
+namespace art {
+
+enum InstructionSet {
+ kNone,
+ kArm,
+ kArm64,
+ kThumb2,
+ kX86,
+ kX86_64,
+ kMips,
+ kMips64
+};
+std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
+
+#if defined(__arm__)
+static constexpr InstructionSet kRuntimeISA = kArm;
+#elif defined(__aarch64__)
+static constexpr InstructionSet kRuntimeISA = kArm64;
+#elif defined(__mips__)
+static constexpr InstructionSet kRuntimeISA = kMips;
+#elif defined(__i386__)
+static constexpr InstructionSet kRuntimeISA = kX86;
+#elif defined(__x86_64__)
+static constexpr InstructionSet kRuntimeISA = kX86_64;
+#else
+static constexpr InstructionSet kRuntimeISA = kNone;
+#endif
+
+// Architecture-specific pointer sizes
+static constexpr size_t kArmPointerSize = 4;
+static constexpr size_t kArm64PointerSize = 8;
+static constexpr size_t kMipsPointerSize = 4;
+static constexpr size_t kMips64PointerSize = 8;
+static constexpr size_t kX86PointerSize = 4;
+static constexpr size_t kX86_64PointerSize = 8;
+
+// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
+// but ARM ELF requires 8..
+static constexpr size_t kArmAlignment = 8;
+
+// ARM64 instruction alignment. This is the recommended alignment for maximum performance.
+static constexpr size_t kArm64Alignment = 16;
+
+// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned.
+// TODO: Can this be 4?
+static constexpr size_t kMipsAlignment = 8;
+
+// X86 instruction alignment. This is the recommended alignment for maximum performance.
+static constexpr size_t kX86Alignment = 16;
+
+
+const char* GetInstructionSetString(InstructionSet isa);
+
+// Note: Returns kNone when the string cannot be parsed to a known value.
+InstructionSet GetInstructionSetFromString(const char* instruction_set);
+
+static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return kArmPointerSize;
+ case kArm64:
+ return kArm64PointerSize;
+ case kX86:
+ return kX86PointerSize;
+ case kX86_64:
+ return kX86_64PointerSize;
+ case kMips:
+ return kMipsPointerSize;
+ case kMips64:
+ return kMips64PointerSize;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have pointer size.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+size_t GetInstructionSetAlignment(InstructionSet isa);
+
+static inline bool Is64BitInstructionSet(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ case kMips:
+ return false;
+
+ case kArm64:
+ case kX86_64:
+ case kMips64:
+ return true;
+
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have bit width.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return 4;
+ case kArm64:
+ return 8;
+ case kX86:
+ return 4;
+ case kX86_64:
+ return 8;
+ case kMips:
+ return 4;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have spills.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+static inline size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ // Fall-through.
+ case kThumb2:
+ return 4;
+ case kArm64:
+ return 8;
+ case kX86:
+ return 8;
+ case kX86_64:
+ return 8;
+ case kMips:
+ return 4;
+ case kNone:
+ LOG(FATAL) << "ISA kNone does not have spills.";
+ UNREACHABLE();
+ default:
+ LOG(FATAL) << "Unknown ISA " << isa;
+ UNREACHABLE();
+ }
+}
+
+size_t GetStackOverflowReservedBytes(InstructionSet isa);
+
+// The following definitions create return types for two word-sized entities that will be passed
+// in registers so that memory operations for the interface trampolines can be avoided. The entities
+// are the resolved method and the pointer to the code to be invoked.
+//
+// On x86, ARM32 and MIPS, this is given for a *scalar* 64bit value. The definition thus *must* be
+// uint64_t or long long int.
+//
+// On x86_64 and ARM64, structs are decomposed for allocation, so we can create a structs of two
+// size_t-sized values.
+//
+// We need two operations:
+//
+// 1) A flag value that signals failure. The assembly stubs expect the lower part to be "0".
+// GetTwoWordFailureValue() will return a value that has lower part == 0.
+//
+// 2) A value that combines two word-sized values.
+// GetTwoWordSuccessValue() constructs this.
+//
+// IMPORTANT: If you use this to transfer object pointers, it is your responsibility to ensure
+// that the object does not move or the value is updated. Simple use of this is NOT SAFE
+// when the garbage collector can move objects concurrently. Ensure that required locks
+// are held when using!
+
+#if defined(__i386__) || defined(__arm__) || defined(__mips__)
+typedef uint64_t TwoWordReturn;
+
+// Encodes method_ptr==nullptr and code_ptr==nullptr
+static inline constexpr TwoWordReturn GetTwoWordFailureValue() {
+ return 0;
+}
+
+// Use the lower 32b for the method pointer and the upper 32b for the code pointer.
+static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
+ static_assert(sizeof(uint32_t) == sizeof(uintptr_t), "Unexpected size difference");
+ uint32_t lo32 = lo;
+ uint64_t hi64 = static_cast<uint64_t>(hi);
+ return ((hi64 << 32) | lo32);
+}
+
+#elif defined(__x86_64__) || defined(__aarch64__)
+struct TwoWordReturn {
+ uintptr_t lo;
+ uintptr_t hi;
+};
+
+// Encodes method_ptr==nullptr. Leaves random value in code pointer.
+static inline TwoWordReturn GetTwoWordFailureValue() {
+ TwoWordReturn ret;
+ ret.lo = 0;
+ return ret;
+}
+
+// Write values into their respective members.
+static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
+ TwoWordReturn ret;
+ ret.lo = lo;
+ ret.hi = hi;
+ return ret;
+}
+#else
+#error "Unsupported architecture"
+#endif
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
new file mode 100644
index 0000000..1072562
--- /dev/null
+++ b/runtime/arch/instruction_set_features.cc
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features.h"
+
+#include "base/casts.h"
+#include "utils.h"
+
+
+#include "arm/instruction_set_features_arm.h"
+#include "arm64/instruction_set_features_arm64.h"
+#include "mips/instruction_set_features_mips.h"
+#include "x86/instruction_set_features_x86.h"
+#include "x86_64/instruction_set_features_x86_64.h"
+
+namespace art {
+
+const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
+ const std::string& variant,
+ std::string* error_msg) {
+ const InstructionSetFeatures* result;
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << isa;
+ UNREACHABLE();
+ }
+ CHECK_EQ(result == nullptr, error_msg->size() != 0);
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
+ uint32_t bitmap) {
+ const InstructionSetFeatures* result;
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << isa;
+ UNREACHABLE();
+ }
+ CHECK_EQ(bitmap, result->AsBitmap());
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromCppDefines();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromCppDefines();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromCppDefines();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromCppDefines();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromCppDefines();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+
+const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromCpuInfo();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromCpuInfo();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromCpuInfo();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromCpuInfo();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromCpuInfo();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromHwcap();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromHwcap();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromHwcap();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromHwcap();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromHwcap();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
+ const InstructionSetFeatures* result;
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ result = ArmInstructionSetFeatures::FromAssembly();
+ break;
+ case kArm64:
+ result = Arm64InstructionSetFeatures::FromAssembly();
+ break;
+ case kMips:
+ result = MipsInstructionSetFeatures::FromAssembly();
+ break;
+ case kX86:
+ result = X86InstructionSetFeatures::FromAssembly();
+ break;
+ case kX86_64:
+ result = X86_64InstructionSetFeatures::FromAssembly();
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
+ }
+ return result;
+}
+
+const InstructionSetFeatures* InstructionSetFeatures::AddFeaturesFromString(
+ const std::string& feature_list, std::string* error_msg) const {
+ if (feature_list.empty()) {
+ *error_msg = "No instruction set features specified";
+ return nullptr;
+ }
+ std::vector<std::string> features;
+ Split(feature_list, ',', &features);
+ bool smp = smp_;
+ bool use_default = false; // Have we seen the 'default' feature?
+ bool first = false; // Is this first feature?
+ for (auto it = features.begin(); it != features.end();) {
+ if (use_default) {
+ *error_msg = "Unexpected instruction set features after 'default'";
+ return nullptr;
+ }
+ std::string feature = Trim(*it);
+ bool erase = false;
+ if (feature == "default") {
+ if (!first) {
+ use_default = true;
+ erase = true;
+ } else {
+ *error_msg = "Unexpected instruction set features before 'default'";
+ return nullptr;
+ }
+ } else if (feature == "smp") {
+ smp = true;
+ erase = true;
+ } else if (feature == "-smp") {
+ smp = false;
+ erase = true;
+ }
+ // Erase the smp feature once processed.
+ if (!erase) {
+ ++it;
+ } else {
+ it = features.erase(it);
+ }
+ first = true;
+ }
+ DCHECK_EQ(use_default, features.empty());
+ return AddFeaturesFromSplitString(smp, features, error_msg);
+}
+
+const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
+ DCHECK_EQ(kArm, GetInstructionSet());
+ return down_cast<const ArmInstructionSetFeatures*>(this);
+}
+
+const Arm64InstructionSetFeatures* InstructionSetFeatures::AsArm64InstructionSetFeatures() const {
+ DCHECK_EQ(kArm64, GetInstructionSet());
+ return down_cast<const Arm64InstructionSetFeatures*>(this);
+}
+
+const MipsInstructionSetFeatures* InstructionSetFeatures::AsMipsInstructionSetFeatures() const {
+ DCHECK_EQ(kMips, GetInstructionSet());
+ return down_cast<const MipsInstructionSetFeatures*>(this);
+}
+
+const X86InstructionSetFeatures* InstructionSetFeatures::AsX86InstructionSetFeatures() const {
+ DCHECK(kX86 == GetInstructionSet() || kX86_64 == GetInstructionSet());
+ return down_cast<const X86InstructionSetFeatures*>(this);
+}
+
+const X86_64InstructionSetFeatures* InstructionSetFeatures::AsX86_64InstructionSetFeatures() const {
+ DCHECK_EQ(kX86_64, GetInstructionSet());
+ return down_cast<const X86_64InstructionSetFeatures*>(this);
+}
+
+bool InstructionSetFeatures::FindVariantInArray(const char* variants[], size_t num_variants,
+ const std::string& variant) {
+ const char** begin = variants;
+ const char** end = begin + num_variants;
+ return std::find(begin, end, variant) != end;
+}
+
+std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs) {
+ os << "ISA: " << rhs.GetInstructionSet() << " Feature string: " << rhs.GetFeatureString();
+ return os;
+}
+
+} // namespace art
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
new file mode 100644
index 0000000..2c6e699
--- /dev/null
+++ b/runtime/arch/instruction_set_features.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
+#define ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
+
+#include <ostream>
+#include <vector>
+
+#include "base/macros.h"
+#include "instruction_set.h"
+
+namespace art {
+
+class ArmInstructionSetFeatures;
+class Arm64InstructionSetFeatures;
+class MipsInstructionSetFeatures;
+class X86InstructionSetFeatures;
+class X86_64InstructionSetFeatures;
+
+// Abstraction used to describe features of a different instruction sets.
+class InstructionSetFeatures {
+ public:
+ // Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
+ static const InstructionSetFeatures* FromVariant(InstructionSet isa,
+ const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap for the given isa and create an InstructionSetFeatures.
+ static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
+ static const InstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const InstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const InstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const InstructionSetFeatures* FromAssembly();
+
+ // Parse a string of the form "div,-atomic_ldrd_strd" adding and removing these features to
+ // create a new InstructionSetFeatures.
+ const InstructionSetFeatures* AddFeaturesFromString(const std::string& feature_list,
+ std::string* error_msg) const WARN_UNUSED;
+
+ // Are these features the same as the other given features?
+ virtual bool Equals(const InstructionSetFeatures* other) const = 0;
+
+ // Return the ISA these features relate to.
+ virtual InstructionSet GetInstructionSet() const = 0;
+
+ // Return a bitmap that represents the features. ISA specific.
+ virtual uint32_t AsBitmap() const = 0;
+
+ // Return a string of the form "div,lpae" or "none".
+ virtual std::string GetFeatureString() const = 0;
+
+ // Does the instruction set variant require instructions for correctness with SMP?
+ bool IsSmp() const {
+ return smp_;
+ }
+
+ // Down cast this ArmInstructionFeatures.
+ const ArmInstructionSetFeatures* AsArmInstructionSetFeatures() const;
+
+ // Down cast this Arm64InstructionFeatures.
+ const Arm64InstructionSetFeatures* AsArm64InstructionSetFeatures() const;
+
+ // Down cast this MipsInstructionFeatures.
+ const MipsInstructionSetFeatures* AsMipsInstructionSetFeatures() const;
+
+ // Down cast this X86InstructionFeatures.
+ const X86InstructionSetFeatures* AsX86InstructionSetFeatures() const;
+
+ // Down cast this X86_64InstructionFeatures.
+ const X86_64InstructionSetFeatures* AsX86_64InstructionSetFeatures() const;
+
+ virtual ~InstructionSetFeatures() {}
+
+ protected:
+ explicit InstructionSetFeatures(bool smp) : smp_(smp) {}
+
+ // Returns true if variant appears in the array variants.
+ static bool FindVariantInArray(const char* variants[], size_t num_variants,
+ const std::string& variant);
+
+ // Add architecture specific features in sub-classes.
+ virtual const InstructionSetFeatures*
+ AddFeaturesFromSplitString(bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const = 0;
+
+ private:
+ const bool smp_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
+};
+std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs);
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_INSTRUCTION_SET_FEATURES_H_
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
new file mode 100644
index 0000000..83571cf
--- /dev/null
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features.h"
+
+#include <gtest/gtest.h>
+
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+#endif
+
+#include "base/stringprintf.h"
+
+namespace art {
+
+#ifdef HAVE_ANDROID_OS
+TEST(InstructionSetFeaturesTest, FeaturesFromSystemPropertyVariant) {
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Read the features property.
+ std::string key = StringPrintf("dalvik.vm.isa.%s.variant", GetInstructionSetString(kRuntimeISA));
+ char dex2oat_isa_variant[PROPERTY_VALUE_MAX];
+ if (property_get(key.c_str(), dex2oat_isa_variant, nullptr) > 0) {
+ // Use features from property to build InstructionSetFeatures and check against build's
+ // features.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> property_features(
+ InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
+ ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ << "System property features: " << *property_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+ }
+}
+
+TEST(InstructionSetFeaturesTest, FeaturesFromSystemPropertyString) {
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Read the features property.
+ std::string key = StringPrintf("dalvik.vm.isa.%s.features", GetInstructionSetString(kRuntimeISA));
+ char dex2oat_isa_features[PROPERTY_VALUE_MAX];
+ if (property_get(key.c_str(), dex2oat_isa_features, nullptr) > 0) {
+ // Use features from property to build InstructionSetFeatures and check against build's
+ // features.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> base_features(
+ InstructionSetFeatures::FromVariant(kRuntimeISA, "default", &error_msg));
+ ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
+
+ std::unique_ptr<const InstructionSetFeatures> property_features(
+ base_features->AddFeaturesFromString(dex2oat_isa_features, &error_msg));
+ ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
+
+ EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
+ << "System property features: " << *property_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+ }
+}
+
+#if defined(__arm__)
+TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromCpuInfo) {
+ LOG(WARNING) << "Test disabled due to buggy ARM kernels";
+#else
+TEST(InstructionSetFeaturesTest, FeaturesFromCpuInfo) {
+#endif
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Check we get the same instruction set features using /proc/cpuinfo.
+ std::unique_ptr<const InstructionSetFeatures> cpuinfo_features(
+ InstructionSetFeatures::FromCpuInfo());
+ EXPECT_TRUE(cpuinfo_features->Equals(instruction_set_features.get()))
+ << "CPU Info features: " << *cpuinfo_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+}
+#endif
+
+#ifndef HAVE_ANDROID_OS
+TEST(InstructionSetFeaturesTest, HostFeaturesFromCppDefines) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> default_features(
+ InstructionSetFeatures::FromVariant(kRuntimeISA, "default", &error_msg));
+ ASSERT_TRUE(error_msg.empty());
+
+ std::unique_ptr<const InstructionSetFeatures> cpp_features(
+ InstructionSetFeatures::FromCppDefines());
+ EXPECT_TRUE(default_features->Equals(cpp_features.get()))
+ << "Default variant features: " << *default_features.get()
+ << "\nFeatures from build: " << *cpp_features.get();
+}
+#endif
+
+#if defined(__arm__)
+TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromHwcap) {
+ LOG(WARNING) << "Test disabled due to buggy ARM kernels";
+#else
+TEST(InstructionSetFeaturesTest, FeaturesFromHwcap) {
+#endif
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Check we get the same instruction set features using AT_HWCAP.
+ std::unique_ptr<const InstructionSetFeatures> hwcap_features(
+ InstructionSetFeatures::FromHwcap());
+ EXPECT_TRUE(hwcap_features->Equals(instruction_set_features.get()))
+ << "Hwcap features: " << *hwcap_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+}
+
+
+#if defined(__arm__)
+TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromAssembly) {
+ LOG(WARNING) << "Test disabled due to buggy ARM kernels";
+#else
+TEST(InstructionSetFeaturesTest, FeaturesFromAssembly) {
+#endif
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
+ InstructionSetFeatures::FromCppDefines());
+
+ // Check we get the same instruction set features using assembly tests.
+ std::unique_ptr<const InstructionSetFeatures> assembly_features(
+ InstructionSetFeatures::FromAssembly());
+ EXPECT_TRUE(assembly_features->Equals(instruction_set_features.get()))
+ << "Assembly features: " << *assembly_features.get()
+ << "\nFeatures from build: " << *instruction_set_features.get();
+}
+
+} // namespace art
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
new file mode 100644
index 0000000..932ef32
--- /dev/null
+++ b/runtime/arch/instruction_set_test.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set.h"
+
+#include <gtest/gtest.h>
+
+#include "base/stringprintf.h"
+
+namespace art {
+
+TEST(InstructionSetTest, GetInstructionSetFromString) {
+ EXPECT_EQ(kArm, GetInstructionSetFromString("arm"));
+ EXPECT_EQ(kArm64, GetInstructionSetFromString("arm64"));
+ EXPECT_EQ(kX86, GetInstructionSetFromString("x86"));
+ EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
+ EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
+ EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
+ EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
+}
+
+TEST(InstructionSetTest, GetInstructionSetString) {
+ EXPECT_STREQ("arm", GetInstructionSetString(kArm));
+ EXPECT_STREQ("arm", GetInstructionSetString(kThumb2));
+ EXPECT_STREQ("arm64", GetInstructionSetString(kArm64));
+ EXPECT_STREQ("x86", GetInstructionSetString(kX86));
+ EXPECT_STREQ("x86_64", GetInstructionSetString(kX86_64));
+ EXPECT_STREQ("mips", GetInstructionSetString(kMips));
+ EXPECT_STREQ("none", GetInstructionSetString(kNone));
+}
+
+TEST(InstructionSetTest, TestRoundTrip) {
+ EXPECT_EQ(kRuntimeISA, GetInstructionSetFromString(GetInstructionSetString(kRuntimeISA)));
+}
+
+TEST(InstructionSetTest, PointerSize) {
+ EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
+}
+
+} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
new file mode 100644
index 0000000..efec993
--- /dev/null
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_mips.h"
+
+#include <fstream>
+#include <sstream>
+
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+namespace art {
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromVariant(
+ const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED) {
+ if (variant != "default") {
+ std::ostringstream os;
+ LOG(WARNING) << "Unexpected CPU variant for Mips using defaults: " << variant;
+ }
+ bool smp = true; // Conservative default.
+ bool fpu_32bit = true;
+ bool mips_isa_gte2 = true;
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
+ bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCppDefines() {
+#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
+ const bool smp = false;
+#else
+ const bool smp = true;
+#endif
+
+ // TODO: here we assume the FPU is always 32-bit.
+ const bool fpu_32bit = true;
+
+#if __mips_isa_rev >= 2
+ const bool mips_isa_gte2 = true;
+#else
+ const bool mips_isa_gte2 = false;
+#endif
+
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromCpuInfo() {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+
+ // TODO: here we assume the FPU is always 32-bit.
+ const bool fpu_32bit = true;
+
+ // TODO: here we assume all MIPS processors are >= v2.
+#if __mips_isa_rev >= 2
+ const bool mips_isa_gte2 = true;
+#else
+ const bool mips_isa_gte2 = false;
+#endif
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("processor") != std::string::npos && line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromHwcap() {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines();
+}
+
+const MipsInstructionSetFeatures* MipsInstructionSetFeatures::FromAssembly() {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines();
+}
+
+bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (kMips != other->GetInstructionSet()) {
+ return false;
+ }
+ const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures();
+ return (IsSmp() == other->IsSmp()) &&
+ (fpu_32bit_ == other_as_mips->fpu_32bit_) &&
+ (mips_isa_gte2_ == other_as_mips->mips_isa_gte2_);
+}
+
+uint32_t MipsInstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) |
+ (fpu_32bit_ ? kFpu32Bitfield : 0) |
+ (mips_isa_gte2_ ? kIsaRevGte2Bitfield : 0);
+}
+
+std::string MipsInstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (fpu_32bit_) {
+ result += ",fpu32";
+ } else {
+ result += ",-fpu32";
+ }
+ if (mips_isa_gte2_) {
+ result += ",mips2";
+ } else {
+ result += ",-mips2";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* MipsInstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
+ bool fpu_32bit = fpu_32bit_;
+ bool mips_isa_gte2 = mips_isa_gte2_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "fpu32") {
+ fpu_32bit = true;
+ } else if (feature == "-fpu32") {
+ fpu_32bit = false;
+ } else if (feature == "mips2") {
+ mips_isa_gte2 = true;
+ } else if (feature == "-mips2") {
+ mips_isa_gte2 = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ return new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2);
+}
+
+} // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
new file mode 100644
index 0000000..f7c64fe
--- /dev/null
+++ b/runtime/arch/mips/instruction_set_features_mips.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the MIPS architecture.
+class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromBitmap(uint32_t bitmap);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const MipsInstructionSetFeatures* FromCppDefines();
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromCpuInfo();
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const MipsInstructionSetFeatures* FromHwcap();
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const MipsInstructionSetFeatures* FromAssembly();
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kMips;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ std::string GetFeatureString() const OVERRIDE;
+
+ // Is this an ISA revision greater than 2 opening up new opcodes.
+ bool IsMipsIsaRevGreaterThanEqual2() const {
+ return mips_isa_gte2_;
+ }
+
+ // Floating point double registers are encoded differently based on whether the Status.FR bit is
+ // set. When the FR bit is 0 then the FPU is 32-bit, 1 its 64-bit. Return true if the code should
+ // be generated assuming Status.FR is 0.
+ bool Is32BitFloatingPoint() const {
+ return fpu_32bit_;
+ }
+
+ virtual ~MipsInstructionSetFeatures() {}
+
+ protected:
+ // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
+ virtual const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE;
+
+ private:
+ MipsInstructionSetFeatures(bool smp, bool fpu_32bit, bool mips_isa_gte2)
+ : InstructionSetFeatures(smp), fpu_32bit_(fpu_32bit), mips_isa_gte2_(mips_isa_gte2) {
+ }
+
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kFpu32Bitfield = 2,
+ kIsaRevGte2Bitfield = 4,
+ };
+
+ const bool fpu_32bit_;
+ const bool mips_isa_gte2_;
+
+ DISALLOW_COPY_AND_ASSIGN(MipsInstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc
new file mode 100644
index 0000000..9b81ce2
--- /dev/null
+++ b/runtime/arch/mips/instruction_set_features_mips_test.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_mips.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(MipsInstructionSetFeaturesTest, MipsFeatures) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips_features(
+ InstructionSetFeatures::FromVariant(kMips, "default", &error_msg));
+ ASSERT_TRUE(mips_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips_features->GetInstructionSet(), kMips);
+ EXPECT_TRUE(mips_features->Equals(mips_features.get()));
+ EXPECT_STREQ("smp,fpu32,mips2", mips_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips_features->AsBitmap(), 7U);
+}
+
+} // namespace art
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
new file mode 100644
index 0000000..32cf909
--- /dev/null
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_x86.h"
+
+#include <fstream>
+#include <sstream>
+
+#include "arch/x86_64/instruction_set_features_x86_64.h"
+#include "base/stringprintf.h"
+#include "utils.h" // For Trim.
+
+namespace art {
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
+ const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED,
+ bool x86_64) {
+ bool known_variant = false;
+ bool smp = true; // Conservative default.
+ static const char* x86_variants_with_ssse3[] = {
+ "atom"
+ };
+ bool has_SSSE3 = FindVariantInArray(x86_variants_with_ssse3, arraysize(x86_variants_with_ssse3),
+ variant);
+ bool has_SSE4_1 = false;
+ bool has_SSE4_2 = false;
+ bool has_AVX = false;
+ bool has_AVX2 = false;
+ if (!known_variant && variant != "default") {
+ std::ostringstream os;
+ LOG(WARNING) << "Unexpected CPU variant for X86 using defaults: " << variant;
+ }
+
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromBitmap(uint32_t bitmap,
+ bool x86_64) {
+ bool smp = (bitmap & kSmpBitfield) != 0;
+ bool has_SSSE3 = (bitmap & kSsse3Bitfield) != 0;
+ bool has_SSE4_1 = (bitmap & kSse4_1Bitfield) != 0;
+ bool has_SSE4_2 = (bitmap & kSse4_2Bitfield) != 0;
+ bool has_AVX = (bitmap & kAvxBitfield) != 0;
+ bool has_AVX2 = (bitmap & kAvxBitfield) != 0;
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCppDefines(bool x86_64) {
+#if defined(HAVE_ANDROID_OS) && (ANDROID_SMP == 0)
+ const bool smp = false;
+#else
+ const bool smp = true;
+#endif
+
+#ifndef __SSSE3__
+ const bool has_SSSE3 = false;
+#else
+ const bool has_SSSE3 = true;
+#endif
+
+#ifndef __SSE4_1__
+ const bool has_SSE4_1 = false;
+#else
+ const bool has_SSE4_1 = true;
+#endif
+
+#ifndef __SSE4_2__
+ const bool has_SSE4_2 = false;
+#else
+ const bool has_SSE4_2 = true;
+#endif
+
+#ifndef __AVX__
+ const bool has_AVX = false;
+#else
+ const bool has_AVX = true;
+#endif
+
+#ifndef __AVX2__
+ const bool has_AVX2 = false;
+#else
+ const bool has_AVX2 = true;
+#endif
+
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromCpuInfo(bool x86_64) {
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool smp = false;
+ bool has_SSSE3 = false;
+ bool has_SSE4_1 = false;
+ bool has_SSE4_2 = false;
+ bool has_AVX = false;
+ bool has_AVX2 = false;
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("flags") != std::string::npos) {
+ LOG(INFO) << "found flags";
+ if (line.find("ssse3") != std::string::npos) {
+ has_SSSE3 = true;
+ }
+ if (line.find("sse4_1") != std::string::npos) {
+ has_SSE4_1 = true;
+ }
+ if (line.find("sse4_2") != std::string::npos) {
+ has_SSE4_2 = true;
+ }
+ if (line.find("avx") != std::string::npos) {
+ has_AVX = true;
+ }
+ if (line.find("avx2") != std::string::npos) {
+ has_AVX2 = true;
+ }
+ } else if (line.find("processor") != std::string::npos &&
+ line.find(": 1") != std::string::npos) {
+ smp = true;
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromHwcap(bool x86_64) {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines(x86_64);
+}
+
+const X86InstructionSetFeatures* X86InstructionSetFeatures::FromAssembly(bool x86_64) {
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines(x86_64);
+}
+
+bool X86InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
+ if (GetInstructionSet() != other->GetInstructionSet()) {
+ return false;
+ }
+ const X86InstructionSetFeatures* other_as_x86 = other->AsX86InstructionSetFeatures();
+ return (IsSmp() == other->IsSmp()) &&
+ (has_SSSE3_ == other_as_x86->has_SSSE3_) &&
+ (has_SSE4_1_ == other_as_x86->has_SSE4_1_) &&
+ (has_SSE4_2_ == other_as_x86->has_SSE4_2_) &&
+ (has_AVX_ == other_as_x86->has_AVX_) &&
+ (has_AVX2_ == other_as_x86->has_AVX2_);
+}
+
+uint32_t X86InstructionSetFeatures::AsBitmap() const {
+ return (IsSmp() ? kSmpBitfield : 0) |
+ (has_SSSE3_ ? kSsse3Bitfield : 0) |
+ (has_SSE4_1_ ? kSse4_1Bitfield : 0) |
+ (has_SSE4_2_ ? kSse4_2Bitfield : 0) |
+ (has_AVX_ ? kAvxBitfield : 0) |
+ (has_AVX2_ ? kAvx2Bitfield : 0);
+}
+
+std::string X86InstructionSetFeatures::GetFeatureString() const {
+ std::string result;
+ if (IsSmp()) {
+ result += "smp";
+ } else {
+ result += "-smp";
+ }
+ if (has_SSSE3_) {
+ result += ",ssse3";
+ } else {
+ result += ",-ssse3";
+ }
+ if (has_SSE4_1_) {
+ result += ",sse4.1";
+ } else {
+ result += ",-sse4.1";
+ }
+ if (has_SSE4_2_) {
+ result += ",sse4.2";
+ } else {
+ result += ",-sse4.2";
+ }
+ if (has_AVX_) {
+ result += ",avx";
+ } else {
+ result += ",-avx";
+ }
+ if (has_AVX2_) {
+ result += ",avx2";
+ } else {
+ result += ",-avx2";
+ }
+ return result;
+}
+
+const InstructionSetFeatures* X86InstructionSetFeatures::AddFeaturesFromSplitString(
+ const bool smp, const std::vector<std::string>& features, bool x86_64,
+ std::string* error_msg) const {
+ bool has_SSSE3 = has_SSSE3_;
+ bool has_SSE4_1 = has_SSE4_1_;
+ bool has_SSE4_2 = has_SSE4_2_;
+ bool has_AVX = has_AVX_;
+ bool has_AVX2 = has_AVX2_;
+ for (auto i = features.begin(); i != features.end(); i++) {
+ std::string feature = Trim(*i);
+ if (feature == "ssse3") {
+ has_SSSE3 = true;
+ } else if (feature == "-ssse3") {
+ has_SSSE3 = false;
+ } else if (feature == "sse4.1") {
+ has_SSE4_1 = true;
+ } else if (feature == "-sse4.1") {
+ has_SSE4_1 = false;
+ } else if (feature == "sse4.2") {
+ has_SSE4_2 = true;
+ } else if (feature == "-sse4.2") {
+ has_SSE4_2 = false;
+ } else if (feature == "avx") {
+ has_AVX = true;
+ } else if (feature == "-avx") {
+ has_AVX = false;
+ } else if (feature == "avx2") {
+ has_AVX2 = true;
+ } else if (feature == "-avx2") {
+ has_AVX2 = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
+ }
+ if (x86_64) {
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ } else {
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2);
+ }
+}
+
+} // namespace art
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
new file mode 100644
index 0000000..926fabb
--- /dev/null
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_INSTRUCTION_SET_FEATURES_X86_H_
+#define ART_RUNTIME_ARCH_X86_INSTRUCTION_SET_FEATURES_X86_H_
+
+#include "arch/instruction_set_features.h"
+
+namespace art {
+
+// Instruction set features relevant to the X86 architecture.
+class X86InstructionSetFeatures : public InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg,
+ bool x86_64 = false);
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromBitmap(uint32_t bitmap, bool x86_64 = false);
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const X86InstructionSetFeatures* FromCppDefines(bool x86_64 = false);
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromCpuInfo(bool x86_64 = false);
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const X86InstructionSetFeatures* FromHwcap(bool x86_64 = false);
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const X86InstructionSetFeatures* FromAssembly(bool x86_64 = false);
+
+ bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
+
+ virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ return kX86;
+ }
+
+ uint32_t AsBitmap() const OVERRIDE;
+
+ std::string GetFeatureString() const OVERRIDE;
+
+ virtual ~X86InstructionSetFeatures() {}
+
+ protected:
+ // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
+ virtual const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE {
+ return AddFeaturesFromSplitString(smp, features, false, error_msg);
+ }
+
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ bool x86_64, std::string* error_msg) const;
+
+ X86InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2,
+ bool has_AVX, bool has_AVX2)
+ : InstructionSetFeatures(smp), has_SSSE3_(has_SSSE3), has_SSE4_1_(has_SSE4_1),
+ has_SSE4_2_(has_SSE4_2), has_AVX_(has_AVX), has_AVX2_(has_AVX2) {
+ }
+
+ private:
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kSmpBitfield = 1,
+ kSsse3Bitfield = 2,
+ kSse4_1Bitfield = 4,
+ kSse4_2Bitfield = 8,
+ kAvxBitfield = 16,
+ kAvx2Bitfield = 32,
+ };
+
+ const bool has_SSSE3_; // x86 128bit SIMD - Supplemental SSE.
+ const bool has_SSE4_1_; // x86 128bit SIMD SSE4.1.
+ const bool has_SSE4_2_; // x86 128bit SIMD SSE4.2.
+ const bool has_AVX_; // x86 256bit SIMD AVX.
+ const bool has_AVX2_; // x86 256bit SIMD AVX 2.0.
+
+ DISALLOW_COPY_AND_ASSIGN(X86InstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_INSTRUCTION_SET_FEATURES_X86_H_
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
new file mode 100644
index 0000000..d231beb
--- /dev/null
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_x86.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromDefaultVariant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_features(
+ InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 1U);
+}
+
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) {
+ // Build features for a 32-bit x86 atom processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_features(
+ InstructionSetFeatures::FromVariant(kX86, "atom", &error_msg));
+ ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+ EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 3U);
+
+ // Build features for a 32-bit x86 default processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+ InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ x86_default_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_default_features->AsBitmap(), 1U);
+
+ // Build features for a 64-bit x86-64 atom processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+ InstructionSetFeatures::FromVariant(kX86_64, "atom", &error_msg));
+ ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+ EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ x86_64_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_64_features->AsBitmap(), 3U);
+
+ EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+ EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+ EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
+}
+
+} // namespace art
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
new file mode 100644
index 0000000..3280177
--- /dev/null
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_INSTRUCTION_SET_FEATURES_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_INSTRUCTION_SET_FEATURES_X86_64_H_
+
+#include "arch/x86/instruction_set_features_x86.h"
+
+namespace art {
+
+// Instruction set features relevant to the X86_64 architecture.
+class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures {
+ public:
+ // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromVariant(const std::string& variant,
+ std::string* error_msg) {
+ return X86InstructionSetFeatures::FromVariant(variant, error_msg, true)
+ ->AsX86_64InstructionSetFeatures();
+ }
+
+ // Parse a bitmap and create an InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromBitmap(uint32_t bitmap) {
+ return X86InstructionSetFeatures::FromBitmap(bitmap, true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Turn C pre-processor #defines into the equivalent instruction set features.
+ static const X86_64InstructionSetFeatures* FromCppDefines() {
+ return X86InstructionSetFeatures::FromCppDefines(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromCpuInfo() {
+ return X86InstructionSetFeatures::FromCpuInfo(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
+ // InstructionSetFeatures.
+ static const X86_64InstructionSetFeatures* FromHwcap() {
+ return X86InstructionSetFeatures::FromHwcap(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
+ // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
+ static const X86_64InstructionSetFeatures* FromAssembly() {
+ return X86InstructionSetFeatures::FromAssembly(true)->AsX86_64InstructionSetFeatures();
+ }
+
+ InstructionSet GetInstructionSet() const OVERRIDE {
+ return kX86_64;
+ }
+
+ virtual ~X86_64InstructionSetFeatures() {}
+
+ protected:
+ // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
+ const InstructionSetFeatures*
+ AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features,
+ std::string* error_msg) const OVERRIDE {
+ return X86InstructionSetFeatures::AddFeaturesFromSplitString(smp, features, true, error_msg);
+ }
+
+ private:
+ X86_64InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2,
+ bool has_AVX, bool has_AVX2)
+ : X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2) {
+ }
+
+ friend class X86InstructionSetFeatures;
+
+ DISALLOW_COPY_AND_ASSIGN(X86_64InstructionSetFeatures);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_64_INSTRUCTION_SET_FEATURES_X86_64_H_
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
new file mode 100644
index 0000000..5171080
--- /dev/null
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_set_features_x86_64.h"
+
+#include <gtest/gtest.h>
+
+namespace art {
+
+TEST(X86_64InstructionSetFeaturesTest, X86Features) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+ InstructionSetFeatures::FromVariant(kX86_64, "default", &error_msg));
+ ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ x86_64_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_64_features->AsBitmap(), 1U);
+}
+
+} // namespace art
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index d3a2655..b781d60 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -236,4 +236,28 @@
#endif
}
+void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity,
+ const char* message) {
+#ifdef HAVE_ANDROID_OS
+ // TODO: be more conservative on stack usage here.
+ LogLine(file, line, log_severity, message);
+#else
+ static const char* log_characters = "VDIWEFF";
+ CHECK_EQ(strlen(log_characters), INTERNAL_FATAL + 1U);
+
+ const char* program_name = ProgramInvocationShortName();
+ write(STDERR_FILENO, program_name, strlen(program_name));
+ write(STDERR_FILENO, " ", 1);
+ write(STDERR_FILENO, &log_characters[log_severity], 1);
+ write(STDERR_FILENO, " ", 1);
+ // TODO: pid and tid.
+ write(STDERR_FILENO, file, strlen(file));
+ // TODO: line.
+ UNUSED(line);
+ write(STDERR_FILENO, "] ", 2);
+ write(STDERR_FILENO, message, strlen(message));
+ write(STDERR_FILENO, "\n", 1);
+#endif
+}
+
} // namespace art
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index baa83e3..ae83e33 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -244,6 +244,10 @@
// The routine that performs the actual logging.
static void LogLine(const char* file, unsigned int line, LogSeverity severity, const char* msg);
+ // A variant of the above for use with little stack.
+ static void LogLineLowStack(const char* file, unsigned int line, LogSeverity severity,
+ const char* msg);
+
private:
const std::unique_ptr<LogMessageData> data_;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 423ea77..4957988 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -57,7 +57,6 @@
Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
-Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
@@ -202,7 +201,7 @@
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
// We expect waits to happen while holding the thread list suspend thread lock.
- if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
+ if (held_mutex != NULL) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -918,16 +917,14 @@
DCHECK(mutator_lock_ != nullptr);
DCHECK(profiler_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
- DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
- LockLevel current_lock_level = kThreadListSuspendThreadLock;
- DCHECK(thread_list_suspend_thread_lock_ == nullptr);
- thread_list_suspend_thread_lock_ =
- new Mutex("thread list suspend thread by .. lock", current_lock_level);
+ LockLevel current_lock_level = kInstrumentEntrypointsLock;
+ DCHECK(instrument_entrypoints_lock_ == nullptr);
+ instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
if (new_level >= current_lock_level) { \
@@ -938,10 +935,6 @@
} \
current_lock_level = new_level;
- UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock);
- DCHECK(instrument_entrypoints_lock_ == nullptr);
- instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
DCHECK(mutator_lock_ == nullptr);
mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d589eb6..9c93cc6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -101,7 +101,6 @@
kHeapBitmapLock,
kMutatorLock,
kInstrumentEntrypointsLock,
- kThreadListSuspendThreadLock,
kZygoteCreationLock,
kLockLevelCount // Must come last.
@@ -486,17 +485,8 @@
public:
static void Init();
- // There's a potential race for two threads to try to suspend each other and for both of them
- // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
- // requesting suspension of another at any time. As the the thread list suspend thread logic
- // transitions to runnable, if the current thread were tried to be suspended then this thread
- // would block holding this lock until it could safely request thread suspension of the other
- // thread without that thread having a suspension request against this thread. This avoids a
- // potential deadlock cycle.
- static Mutex* thread_list_suspend_thread_lock_;
-
// Guards allocation entrypoint instrumenting.
- static Mutex* instrument_entrypoints_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
+ static Mutex* instrument_entrypoints_lock_;
// The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
// mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fb90b91..84cbcdc 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -185,15 +185,13 @@
}
// Shuffle fields forward, making use of gaps whenever possible.
template<int n>
-static void ShuffleForward(const size_t num_fields, size_t* current_field_idx,
+static void ShuffleForward(size_t* current_field_idx,
MemberOffset* field_offset,
- mirror::ObjectArray<mirror::ArtField>* fields,
std::deque<mirror::ArtField*>* grouped_and_sorted_fields,
FieldGaps* gaps)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(current_field_idx != nullptr);
DCHECK(grouped_and_sorted_fields != nullptr);
- DCHECK(fields != nullptr || (num_fields == 0 && grouped_and_sorted_fields->empty()));
DCHECK(gaps != nullptr);
DCHECK(field_offset != nullptr);
@@ -211,7 +209,6 @@
}
CHECK(type != Primitive::kPrimNot) << PrettyField(field); // should be primitive types
grouped_and_sorted_fields->pop_front();
- fields->Set<false>(*current_field_idx, field);
if (!gaps->empty() && gaps->top().size >= n) {
FieldGap gap = gaps->top();
gaps->pop();
@@ -5202,17 +5199,22 @@
Primitive::Type type1 = field1->GetTypeAsPrimitiveType();
Primitive::Type type2 = field2->GetTypeAsPrimitiveType();
if (type1 != type2) {
- bool is_primitive1 = type1 != Primitive::kPrimNot;
- bool is_primitive2 = type2 != Primitive::kPrimNot;
- if (type1 != type2) {
- if (is_primitive1 && is_primitive2) {
- // Larger primitive types go first.
- return Primitive::ComponentSize(type1) > Primitive::ComponentSize(type2);
- } else {
- // Reference always goes first.
- return !is_primitive1;
- }
+ if (type1 == Primitive::kPrimNot) {
+ // Reference always goes first.
+ return true;
}
+ if (type2 == Primitive::kPrimNot) {
+ // Reference always goes first.
+ return false;
+ }
+ size_t size1 = Primitive::ComponentSize(type1);
+ size_t size2 = Primitive::ComponentSize(type2);
+ if (size1 != size2) {
+ // Larger primitive types go first.
+ return size1 > size2;
+ }
+ // Primitive types differ but sizes match. Arbitrarily order by primitive type.
+ return type1 < type2;
}
// same basic group? then sort by string.
return strcmp(field1->GetName(), field2->GetName()) < 0;
@@ -5231,13 +5233,7 @@
// Initialize field_offset
MemberOffset field_offset(0);
if (is_static) {
- uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
- if (klass->ShouldHaveEmbeddedImtAndVTable()) {
- // Static fields come after the embedded tables.
- base = mirror::Class::ComputeClassSize(true, klass->GetVTableDuringLinking()->GetLength(),
- 0, 0, 0, 0, 0);
- }
- field_offset = MemberOffset(base);
+ field_offset = klass->GetFirstReferenceStaticFieldOffsetDuringLinking();
} else {
mirror::Class* super_class = klass->GetSuperClass();
if (super_class != nullptr) {
@@ -5274,28 +5270,25 @@
if (isPrimitive) {
break; // past last reference, move on to the next phase
}
- if (UNLIKELY(!IsAligned<4>(field_offset.Uint32Value()))) {
+ if (UNLIKELY(!IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(
+ field_offset.Uint32Value()))) {
MemberOffset old_offset = field_offset;
field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4));
AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps);
}
- DCHECK(IsAligned<4>(field_offset.Uint32Value()));
+ DCHECK(IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(field_offset.Uint32Value()));
grouped_and_sorted_fields.pop_front();
num_reference_fields++;
- fields->Set<false>(current_field, field);
field->SetOffset(field_offset);
- field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(uint32_t));
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
// Gaps are stored as a max heap which means that we must shuffle from largest to smallest
// otherwise we could end up with suboptimal gap fills.
- ShuffleForward<8>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
- ShuffleForward<4>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
- ShuffleForward<2>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
- ShuffleForward<1>(num_fields, ¤t_field, &field_offset,
- fields, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<8>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<4>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<2>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
+ ShuffleForward<1>(¤t_field, &field_offset, &grouped_and_sorted_fields, &gaps);
CHECK(grouped_and_sorted_fields.empty()) << "Missed " << grouped_and_sorted_fields.size() <<
" fields.";
self->EndAssertNoThreadSuspension(old_no_suspend_cause);
@@ -5309,39 +5302,6 @@
--num_reference_fields;
}
- if (kIsDebugBuild) {
- // Make sure that all reference fields appear before
- // non-reference fields, and all double-wide fields are aligned.
- bool seen_non_ref = false;
- for (size_t i = 0; i < num_fields; i++) {
- mirror::ArtField* field = fields->Get(i);
- if ((false)) { // enable to debug field layout
- LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
- << " class=" << PrettyClass(klass.Get())
- << " field=" << PrettyField(field)
- << " offset="
- << field->GetField32(mirror::ArtField::OffsetOffset());
- }
- Primitive::Type type = field->GetTypeAsPrimitiveType();
- bool is_primitive = type != Primitive::kPrimNot;
- if (klass->DescriptorEquals("Ljava/lang/ref/Reference;") &&
- strcmp("referent", field->GetName()) == 0) {
- is_primitive = true; // We lied above, so we have to expect a lie here.
- }
- if (is_primitive) {
- if (!seen_non_ref) {
- seen_non_ref = true;
- DCHECK_EQ(num_reference_fields, i) << PrettyField(field);
- }
- } else {
- DCHECK(!seen_non_ref) << PrettyField(field);
- }
- }
- if (!seen_non_ref) {
- DCHECK_EQ(num_fields, num_reference_fields) << PrettyClass(klass.Get());
- }
- }
-
size_t size = field_offset.Uint32Value();
// Update klass
if (is_static) {
@@ -5360,11 +5320,59 @@
klass->SetObjectSize(size);
}
}
+
+ if (kIsDebugBuild) {
+ // Make sure that the fields array is ordered by name but all reference
+ // offsets are at the beginning as far as alignment allows.
+ MemberOffset start_ref_offset = is_static
+ ? klass->GetFirstReferenceStaticFieldOffsetDuringLinking()
+ : klass->GetFirstReferenceInstanceFieldOffset();
+ MemberOffset end_ref_offset(start_ref_offset.Uint32Value() +
+ num_reference_fields *
+ sizeof(mirror::HeapReference<mirror::Object>));
+ MemberOffset current_ref_offset = start_ref_offset;
+ for (size_t i = 0; i < num_fields; i++) {
+ mirror::ArtField* field = fields->Get(i);
+ if ((false)) { // enable to debug field layout
+ LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
+ << " class=" << PrettyClass(klass.Get())
+ << " field=" << PrettyField(field)
+ << " offset="
+ << field->GetField32(mirror::ArtField::OffsetOffset());
+ }
+ if (i != 0) {
+ mirror::ArtField* prev_field = fields->Get(i - 1u);
+ CHECK_LT(strcmp(prev_field->GetName(), field->GetName()), 0);
+ }
+ Primitive::Type type = field->GetTypeAsPrimitiveType();
+ bool is_primitive = type != Primitive::kPrimNot;
+ if (klass->DescriptorEquals("Ljava/lang/ref/Reference;") &&
+ strcmp("referent", field->GetName()) == 0) {
+ is_primitive = true; // We lied above, so we have to expect a lie here.
+ }
+ MemberOffset offset = field->GetOffsetDuringLinking();
+ if (is_primitive) {
+ if (offset.Uint32Value() < end_ref_offset.Uint32Value()) {
+ // Shuffled before references.
+ size_t type_size = Primitive::ComponentSize(type);
+ CHECK_LT(type_size, sizeof(mirror::HeapReference<mirror::Object>));
+ CHECK_LT(offset.Uint32Value(), start_ref_offset.Uint32Value());
+ CHECK_LE(offset.Uint32Value() + type_size, start_ref_offset.Uint32Value());
+ CHECK(!IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(offset.Uint32Value()));
+ }
+ } else {
+ CHECK_EQ(current_ref_offset.Uint32Value(), offset.Uint32Value());
+ current_ref_offset = MemberOffset(current_ref_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
+ }
+ }
+ CHECK_EQ(current_ref_offset.Uint32Value(), end_ref_offset.Uint32Value());
+ }
+
return true;
}
-// Set the bitmap of reference offsets, refOffsets, from the ifields
-// list.
+// Set the bitmap of reference instance field offsets.
void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) {
uint32_t reference_offsets = 0;
mirror::Class* super_class = klass->GetSuperClass();
@@ -5374,23 +5382,18 @@
// Compute reference offsets unless our superclass overflowed.
if (reference_offsets != mirror::Class::kClassWalkSuper) {
size_t num_reference_fields = klass->NumReferenceInstanceFieldsDuringLinking();
- mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
- // All of the fields that contain object references are guaranteed
- // to be at the beginning of the fields list.
- for (size_t i = 0; i < num_reference_fields; ++i) {
- // Note that byte_offset is the offset from the beginning of
- // object, not the offset into instance data
- mirror::ArtField* field = fields->Get(i);
- MemberOffset byte_offset = field->GetOffsetDuringLinking();
- uint32_t displaced_bitmap_position =
- (byte_offset.Uint32Value() - mirror::kObjectHeaderSize) /
+ if (num_reference_fields != 0u) {
+ // All of the fields that contain object references are guaranteed be grouped in memory
+ // starting at an appropriately aligned address after super class object data.
+ uint32_t start_offset = RoundUp(super_class->GetObjectSize(),
+ sizeof(mirror::HeapReference<mirror::Object>));
+ uint32_t start_bit = (start_offset - mirror::kObjectHeaderSize) /
sizeof(mirror::HeapReference<mirror::Object>);
- if (displaced_bitmap_position >= 32) {
- // Can't encode offset so fall back on slow-path.
+ if (start_bit + num_reference_fields > 32) {
reference_offsets = mirror::Class::kClassWalkSuper;
- break;
} else {
- reference_offsets |= (1 << displaced_bitmap_position);
+ reference_offsets |= (0xffffffffu << start_bit) &
+ (0xffffffffu >> (32 - (start_bit + num_reference_fields)));
}
}
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index b257343..ba5aa3d 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -275,31 +275,42 @@
EXPECT_TRUE(field->IsStatic());
}
- // Confirm that all instances fields are packed together at the start
+ // Confirm that all instances field offsets are packed together at the start.
EXPECT_GE(klass->NumInstanceFields(), klass->NumReferenceInstanceFields());
StackHandleScope<1> hs(Thread::Current());
MutableHandle<mirror::ArtField> fhandle = hs.NewHandle<mirror::ArtField>(nullptr);
- for (size_t i = 0; i < klass->NumReferenceInstanceFields(); i++) {
- mirror::ArtField* field = klass->GetInstanceField(i);
- fhandle.Assign(field);
- FieldHelper fh(fhandle);
- ASSERT_TRUE(!field->IsPrimitiveType());
- mirror::Class* field_type = fh.GetType();
- ASSERT_TRUE(field_type != nullptr);
- ASSERT_TRUE(!field_type->IsPrimitive());
- }
- for (size_t i = klass->NumReferenceInstanceFields(); i < klass->NumInstanceFields(); i++) {
+ MemberOffset start_ref_offset = klass->GetFirstReferenceInstanceFieldOffset();
+ MemberOffset end_ref_offset(start_ref_offset.Uint32Value() +
+ klass->NumReferenceInstanceFields() *
+ sizeof(mirror::HeapReference<mirror::Object>));
+ MemberOffset current_ref_offset = start_ref_offset;
+ for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
mirror::ArtField* field = klass->GetInstanceField(i);
fhandle.Assign(field);
FieldHelper fh(fhandle);
mirror::Class* field_type = fh.GetType();
ASSERT_TRUE(field_type != nullptr);
- if (!fh.GetField()->IsPrimitiveType() || !field_type->IsPrimitive()) {
- // While Reference.referent is not primitive, the ClassLinker
- // treats it as such so that the garbage collector won't scan it.
- EXPECT_EQ(PrettyField(fh.GetField()), "java.lang.Object java.lang.ref.Reference.referent");
+ if (!field->IsPrimitiveType()) {
+ ASSERT_TRUE(!field_type->IsPrimitive());
+ ASSERT_EQ(current_ref_offset.Uint32Value(), field->GetOffset().Uint32Value());
+ if (current_ref_offset.Uint32Value() == end_ref_offset.Uint32Value()) {
+ // While Reference.referent is not primitive, the ClassLinker
+ // treats it as such so that the garbage collector won't scan it.
+ EXPECT_EQ(PrettyField(fh.GetField()),
+ "java.lang.Object java.lang.ref.Reference.referent");
+ } else {
+ current_ref_offset = MemberOffset(current_ref_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
+ }
+ } else {
+ if (field->GetOffset().Uint32Value() < end_ref_offset.Uint32Value()) {
+ // Shuffled before references.
+ ASSERT_LT(field->GetOffset().Uint32Value(), start_ref_offset.Uint32Value());
+ CHECK(!IsAligned<4>(field->GetOffset().Uint32Value()));
+ }
}
}
+ ASSERT_EQ(end_ref_offset.Uint32Value(), current_ref_offset.Uint32Value());
uint32_t total_num_reference_instance_fields = 0;
mirror::Class* k = klass.Get();
@@ -461,10 +472,7 @@
struct ObjectOffsets : public CheckOffsets<mirror::Object> {
ObjectOffsets() : CheckOffsets<mirror::Object>(false, "Ljava/lang/Object;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, klass_), "shadow$_klass_"));
-
- // alphabetical 32-bit
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_"));
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_rb_ptr_), "shadow$_x_rb_ptr_"));
@@ -475,11 +483,8 @@
struct ArtFieldOffsets : public CheckOffsets<mirror::ArtField> {
ArtFieldOffsets() : CheckOffsets<mirror::ArtField>(false, "Ljava/lang/reflect/ArtField;") {
- // alphabetical references
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, declaring_class_), "declaringClass"));
-
- // alphabetical 32-bit
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, access_flags_), "accessFlags"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, declaring_class_), "declaringClass"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, field_dex_idx_), "fieldDexIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtField, offset_), "offset"));
};
@@ -487,73 +492,61 @@
struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") {
- // alphabetical references
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_), "dexCacheResolvedMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_), "dexCacheResolvedTypes"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_), "dexCacheStrings"));
-
- // alphabetical 64-bit
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_interpreter_), "entryPointFromInterpreter"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_jni_), "entryPointFromJni"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_portable_compiled_code_), "entryPointFromPortableCompiledCode"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_quick_compiled_code_), "entryPointFromQuickCompiledCode"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, gc_map_), "gcMap"));
-
- // alphabetical 32-bit
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"));
};
};
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
- // alphabetical references
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, component_type_), "componentType"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_), "dexCache"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable"));
-
- // alphabetical 32-bit
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_), "numReferenceInstanceFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_), "numReferenceStaticFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), "referenceInstanceOffsets"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable"));
};
};
struct StringOffsets : public CheckOffsets<mirror::String> {
StringOffsets() : CheckOffsets<mirror::String>(false, "Ljava/lang/String;") {
- // alphabetical references
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, array_), "value"));
-
- // alphabetical 32-bit
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, count_), "count"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hashCode"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, offset_), "offset"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, array_), "value"));
};
};
struct ThrowableOffsets : public CheckOffsets<mirror::Throwable> {
ThrowableOffsets() : CheckOffsets<mirror::Throwable>(false, "Ljava/lang/Throwable;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, cause_), "cause"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, detail_message_), "detailMessage"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_state_), "stackState"));
@@ -564,17 +557,15 @@
struct StackTraceElementOffsets : public CheckOffsets<mirror::StackTraceElement> {
StackTraceElementOffsets() : CheckOffsets<mirror::StackTraceElement>(false, "Ljava/lang/StackTraceElement;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, declaring_class_), "declaringClass"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, file_name_), "fileName"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, line_number_), "lineNumber"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName"));
};
};
struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> {
ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache"));
@@ -583,27 +574,24 @@
struct ProxyOffsets : public CheckOffsets<mirror::Proxy> {
ProxyOffsets() : CheckOffsets<mirror::Proxy>(false, "Ljava/lang/reflect/Proxy;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Proxy, h_), "h"));
};
};
struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_types_), "resolvedTypes"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, strings_), "strings"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile"));
};
};
struct ReferenceOffsets : public CheckOffsets<mirror::Reference> {
ReferenceOffsets() : CheckOffsets<mirror::Reference>(false, "Ljava/lang/ref/Reference;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_), "pendingNext"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_), "queue"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_), "queueNext"));
@@ -613,7 +601,6 @@
struct FinalizerReferenceOffsets : public CheckOffsets<mirror::FinalizerReference> {
FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(false, "Ljava/lang/ref/FinalizerReference;") {
- // alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_), "next"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_), "prev"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie"));
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 584743b..e2f6085 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2408,9 +2408,7 @@
if (peer.get() == nullptr) {
return JDWP::ERR_THREAD_NOT_ALIVE;
}
- // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
- // trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ // Suspend thread to build stack trace.
bool timed_out;
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
@@ -3322,13 +3320,9 @@
soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
bool timed_out;
- Thread* suspended_thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
- suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
- }
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
+ &timed_out);
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
if (suspended_thread == nullptr) {
// Thread terminated from under us while suspending.
diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc
index c6f333f..2681ad0 100644
--- a/runtime/dex_method_iterator_test.cc
+++ b/runtime/dex_method_iterator_test.cc
@@ -18,6 +18,7 @@
#include "base/stl_util.h"
#include "common_runtime_test.h"
+#include "oat_file.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -29,9 +30,9 @@
TEST_F(DexMethodIteratorTest, Basic) {
ScopedObjectAccess soa(Thread::Current());
std::vector<const DexFile*> dex_files;
- const char* jars[] = { "core-libart", "conscrypt", "okhttp", "core-junit", "bouncycastle" };
- for (size_t i = 0; i < 5; ++i) {
- dex_files.push_back(LoadExpectSingleDexFile(GetDexFileName(jars[i]).c_str()));
+ CHECK_NE(boot_class_path_.size(), 0U);
+ for (size_t i = 0; i < boot_class_path_.size(); ++i) {
+ dex_files.push_back(boot_class_path_[i]);
}
DexMethodIterator it(dex_files);
while (it.HasNext()) {
@@ -43,7 +44,6 @@
}
it.Next();
}
- STLDeleteElements(&dex_files);
}
} // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index b6cf921..37c5f9c 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include <unistd.h>
+#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/stringprintf.h"
#include "base/stl_util.h"
@@ -29,7 +30,6 @@
#include "elf_utils.h"
#include "leb128.h"
#include "utils.h"
-#include "instruction_set.h"
namespace art {
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index 49357ad..9ffd199 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -17,8 +17,8 @@
#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
#define ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
+#include "arch/instruction_set.h"
#include "base/mutex.h"
-#include "instruction_set.h"
#include "runtime.h"
#include "thread-inl.h"
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index f78273f..00f5cd5 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -16,7 +16,6 @@
#include "callee_save_frame.h"
#include "entrypoints/runtime_asm_entrypoints.h"
-#include "instruction_set.h"
#include "instrumentation.h"
#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 4f61707..c4bc969 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -21,7 +21,6 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
-#include "instruction_set.h"
#include "interpreter/interpreter.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index c473684..ab3ec62 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -179,6 +179,10 @@
// Now set up the nested signal handler.
+ // TODO: add SIGSEGV back to the nested signals when we can handle running out stack gracefully.
+ static const int handled_nested_signals[] = {SIGABRT};
+ constexpr size_t num_handled_nested_signals = arraysize(handled_nested_signals);
+
// Release the fault manager so that it will remove the signal chain for
// SIGSEGV and we call the real sigaction.
fault_manager.Release();
@@ -188,33 +192,40 @@
// Unblock the signals we allow so that they can be delivered in the signal handler.
sigset_t sigset;
sigemptyset(&sigset);
- sigaddset(&sigset, SIGSEGV);
- sigaddset(&sigset, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&sigset, signal);
+ }
pthread_sigmask(SIG_UNBLOCK, &sigset, nullptr);
// If we get a signal in this code we want to invoke our nested signal
// handler.
- struct sigaction action, oldsegvaction, oldabortaction;
+ struct sigaction action;
+ struct sigaction oldactions[num_handled_nested_signals];
action.sa_sigaction = art_nested_signal_handler;
// Explicitly mask out SIGSEGV and SIGABRT from the nested signal handler. This
// should be the default but we definitely don't want these happening in our
// nested signal handler.
sigemptyset(&action.sa_mask);
- sigaddset(&action.sa_mask, SIGSEGV);
- sigaddset(&action.sa_mask, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&action.sa_mask, signal);
+ }
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
#if !defined(__APPLE__) && !defined(__mips__)
action.sa_restorer = nullptr;
#endif
- // Catch SIGSEGV and SIGABRT to invoke our nested handler
- int e1 = sigaction(SIGSEGV, &action, &oldsegvaction);
- int e2 = sigaction(SIGABRT, &action, &oldabortaction);
- if (e1 != 0 || e2 != 0) {
- LOG(ERROR) << "Unable to set up nested signal handler";
- } else {
+ // Catch handled signals to invoke our nested handler.
+ bool success = true;
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &action, &oldactions[i]) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to set up nested signal handler";
+ break;
+ }
+ }
+ if (success) {
// Save the current state and call the handlers. If anything causes a signal
// our nested signal handler will be invoked and this will longjmp to the saved
// state.
@@ -223,8 +234,12 @@
if (handler->Action(sig, info, context)) {
// Restore the signal handlers, reinit the fault manager and return. Signal was
// handled.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
fault_manager.Init();
return;
}
@@ -234,8 +249,12 @@
}
// Restore the signal handlers.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
}
// Now put the fault manager back in place.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index cf7352e..69a573e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -22,6 +22,7 @@
#include <vector>
#include "allocator_type.h"
+#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/timing_logger.h"
#include "gc/accounting/atomic_stack.h"
@@ -32,7 +33,6 @@
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "globals.h"
-#include "instruction_set.h"
#include "jni.h"
#include "object_callbacks.h"
#include "offsets.h"
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
deleted file mode 100644
index e165a75..0000000
--- a/runtime/instruction_set.cc
+++ /dev/null
@@ -1,507 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set.h"
-
-#include <signal.h>
-#include <fstream>
-
-#include "base/casts.h"
-#include "base/stringprintf.h"
-#include "utils.h"
-
-namespace art {
-
-const char* GetInstructionSetString(const InstructionSet isa) {
- switch (isa) {
- case kArm:
- case kThumb2:
- return "arm";
- case kArm64:
- return "arm64";
- case kX86:
- return "x86";
- case kX86_64:
- return "x86_64";
- case kMips:
- return "mips";
- case kNone:
- return "none";
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
- }
-}
-
-InstructionSet GetInstructionSetFromString(const char* isa_str) {
- CHECK(isa_str != nullptr);
-
- if (strcmp("arm", isa_str) == 0) {
- return kArm;
- } else if (strcmp("arm64", isa_str) == 0) {
- return kArm64;
- } else if (strcmp("x86", isa_str) == 0) {
- return kX86;
- } else if (strcmp("x86_64", isa_str) == 0) {
- return kX86_64;
- } else if (strcmp("mips", isa_str) == 0) {
- return kMips;
- }
-
- return kNone;
-}
-
-size_t GetInstructionSetAlignment(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return kArmAlignment;
- case kArm64:
- return kArm64Alignment;
- case kX86:
- // Fall-through.
- case kX86_64:
- return kX86Alignment;
- case kMips:
- return kMipsAlignment;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have alignment.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-
-static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
-static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
-
-static constexpr size_t kArmStackOverflowReservedBytes = 8 * KB;
-static constexpr size_t kArm64StackOverflowReservedBytes = 8 * KB;
-static constexpr size_t kX86StackOverflowReservedBytes = 8 * KB;
-static constexpr size_t kX86_64StackOverflowReservedBytes = 8 * KB;
-
-size_t GetStackOverflowReservedBytes(InstructionSet isa) {
- switch (isa) {
- case kArm: // Intentional fall-through.
- case kThumb2:
- return kArmStackOverflowReservedBytes;
-
- case kArm64:
- return kArm64StackOverflowReservedBytes;
-
- case kMips:
- return kMipsStackOverflowReservedBytes;
-
- case kX86:
- return kX86StackOverflowReservedBytes;
-
- case kX86_64:
- return kX86_64StackOverflowReservedBytes;
-
- case kNone:
- LOG(FATAL) << "kNone has no stack overflow size";
- return 0;
-
- default:
- LOG(FATAL) << "Unknown instruction set" << isa;
- return 0;
- }
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg) {
- const InstructionSetFeatures* result;
- switch (isa) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromVariant(variant, error_msg);
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(isa);
- break;
- }
- CHECK_EQ(result == nullptr, error_msg->size() != 0);
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromFeatureString(InstructionSet isa,
- const std::string& feature_list,
- std::string* error_msg) {
- const InstructionSetFeatures* result;
- switch (isa) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromFeatureString(feature_list, error_msg);
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(isa);
- break;
- }
- // TODO: warn if feature_list doesn't agree with result's GetFeatureList().
- CHECK_EQ(result == nullptr, error_msg->size() != 0);
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromBitmap(InstructionSet isa,
- uint32_t bitmap) {
- const InstructionSetFeatures* result;
- switch (isa) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromBitmap(bitmap);
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(isa);
- break;
- }
- CHECK_EQ(bitmap, result->AsBitmap());
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromCppDefines() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromCppDefines();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-
-const InstructionSetFeatures* InstructionSetFeatures::FromCpuInfo() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromCpuInfo();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromHwcap() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromHwcap();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-const InstructionSetFeatures* InstructionSetFeatures::FromAssembly() {
- const InstructionSetFeatures* result;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- result = ArmInstructionSetFeatures::FromAssembly();
- break;
- default:
- result = UnknownInstructionSetFeatures::Unknown(kRuntimeISA);
- break;
- }
- return result;
-}
-
-const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const {
- DCHECK_EQ(kArm, GetInstructionSet());
- return down_cast<const ArmInstructionSetFeatures*>(this);
-}
-
-std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs) {
- os << "ISA: " << rhs.GetInstructionSet() << " Feature string: " << rhs.GetFeatureString();
- return os;
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromFeatureString(
- const std::string& feature_list, std::string* error_msg) {
- std::vector<std::string> features;
- Split(feature_list, ',', &features);
- bool has_lpae = false;
- bool has_div = false;
- for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = Trim(*i);
- if (feature == "default" || feature == "none") {
- // Nothing to do.
- } else if (feature == "div") {
- has_div = true;
- } else if (feature == "nodiv") {
- has_div = false;
- } else if (feature == "lpae") {
- has_lpae = true;
- } else if (feature == "nolpae") {
- has_lpae = false;
- } else {
- *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
- return nullptr;
- }
- }
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromVariant(
- const std::string& variant, std::string* error_msg) {
- // Look for variants that have divide support.
- bool has_div = false;
- {
- static const char* arm_variants_with_div[] = {
- "cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57",
- "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
- "cyclone", "denver", "krait", "swift"
- };
- for (const char* div_variant : arm_variants_with_div) {
- if (variant == div_variant) {
- has_div = true;
- break;
- }
- }
- }
- // Look for variants that have LPAE support.
- bool has_lpae = false;
- {
- static const char* arm_variants_with_lpae[] = {
- "cortex-a7", "cortex-a15", "krait", "denver"
- };
- for (const char* lpae_variant : arm_variants_with_lpae) {
- if (variant == lpae_variant) {
- has_lpae = true;
- break;
- }
- }
- }
- if (has_div == false && has_lpae == false) {
- // Avoid unsupported variants.
- static const char* unsupported_arm_variants[] = {
- // ARM processors that aren't ARMv7 compatible aren't supported.
- "arm2", "arm250", "arm3", "arm6", "arm60", "arm600", "arm610", "arm620",
- "cortex-m0", "cortex-m0plus", "cortex-m1",
- "fa526", "fa626", "fa606te", "fa626te", "fmp626", "fa726te",
- "iwmmxt", "iwmmxt2",
- "strongarm", "strongarm110", "strongarm1100", "strongarm1110",
- "xscale"
- };
- for (const char* us_variant : unsupported_arm_variants) {
- if (variant == us_variant) {
- *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", us_variant);
- return nullptr;
- }
- }
- // Warn if the variant is unknown.
- // TODO: some of the variants below may have feature support, but that support is currently
- // unknown so we'll choose conservative (sub-optimal) defaults without warning.
- // TODO: some of the architectures may not support all features required by ART and should be
- // moved to unsupported_arm_variants[] above.
- static const char* arm_variants_without_known_features[] = {
- "arm7", "arm7m", "arm7d", "arm7dm", "arm7di", "arm7dmi", "arm70", "arm700", "arm700i",
- "arm710", "arm710c", "arm7100", "arm720", "arm7500", "arm7500fe", "arm7tdmi", "arm7tdmi-s",
- "arm710t", "arm720t", "arm740t",
- "arm8", "arm810",
- "arm9", "arm9e", "arm920", "arm920t", "arm922t", "arm946e-s", "arm966e-s", "arm968e-s",
- "arm926ej-s", "arm940t", "arm9tdmi",
- "arm10tdmi", "arm1020t", "arm1026ej-s", "arm10e", "arm1020e", "arm1022e",
- "arm1136j-s", "arm1136jf-s",
- "arm1156t2-s", "arm1156t2f-s", "arm1176jz-s", "arm1176jzf-s",
- "cortex-a5", "cortex-a8", "cortex-a9", "cortex-a9-mp", "cortex-r4f",
- "marvell-pj4", "mpcore", "mpcorenovfp"
- };
- bool found = false;
- for (const char* ff_variant : arm_variants_without_known_features) {
- if (variant == ff_variant) {
- found = true;
- break;
- }
- }
- if (!found) {
- LOG(WARNING) << "Unknown instruction set features for ARM CPU variant (" << variant
- << ") using conservative defaults";
- }
- }
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
- bool has_lpae = (bitmap & kLpaeBitfield) != 0;
- bool has_div = (bitmap & kDivBitfield) != 0;
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCppDefines() {
-#if defined(__ARM_ARCH_EXT_IDIV__)
- bool has_div = true;
-#else
- bool has_div = false;
-#endif
-#if defined(__ARM_FEATURE_LPAE)
- bool has_lpae = true;
-#else
- bool has_lpae = false;
-#endif
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromCpuInfo() {
- // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
- // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
- bool has_lpae = false;
- bool has_div = false;
-
- std::ifstream in("/proc/cpuinfo");
- if (!in.fail()) {
- while (!in.eof()) {
- std::string line;
- std::getline(in, line);
- if (!in.eof()) {
- LOG(INFO) << "cpuinfo line: " << line;
- if (line.find("Features") != std::string::npos) {
- LOG(INFO) << "found features";
- if (line.find("idivt") != std::string::npos) {
- // We always expect both ARM and Thumb divide instructions to be available or not
- // available.
- CHECK_NE(line.find("idiva"), std::string::npos);
- has_div = true;
- }
- if (line.find("lpae") != std::string::npos) {
- has_lpae = true;
- }
- }
- }
- }
- in.close();
- } else {
- LOG(INFO) << "Failed to open /proc/cpuinfo";
- }
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-#if defined(HAVE_ANDROID_OS) && defined(__arm__)
-#include <sys/auxv.h>
-#include <asm/hwcap.h>
-#endif
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
- bool has_lpae = false;
- bool has_div = false;
-
-#if defined(HAVE_ANDROID_OS) && defined(__arm__)
- uint64_t hwcaps = getauxval(AT_HWCAP);
- LOG(INFO) << "hwcaps=" << hwcaps;
- if ((hwcaps & HWCAP_IDIVT) != 0) {
- // We always expect both ARM and Thumb divide instructions to be available or not
- // available.
- CHECK_NE(hwcaps & HWCAP_IDIVA, 0U);
- has_div = true;
- }
- if ((hwcaps & HWCAP_LPAE) != 0) {
- has_lpae = true;
- }
-#endif
-
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-// A signal handler called by a fault for an illegal instruction. We record the fact in r0
-// and then increment the PC in the signal context to return to the next instruction. We know the
-// instruction is an sdiv (4 bytes long).
-static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
- void* data) {
-#if defined(__arm__)
- struct ucontext *uc = (struct ucontext *)data;
- struct sigcontext *sc = &uc->uc_mcontext;
- sc->arm_r0 = 0; // Set R0 to #0 to signal error.
- sc->arm_pc += 4; // Skip offending instruction.
-#else
- UNUSED(data);
-#endif
-}
-
-#if defined(__arm__)
-extern "C" bool artCheckForARMSDIVInstruction();
-#endif
-
-const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
- // See if have a sdiv instruction. Register a signal handler and try to execute an sdiv
- // instruction. If we get a SIGILL then it's not supported.
- struct sigaction sa, osa;
- sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
- sa.sa_sigaction = bad_divide_inst_handle;
- sigaction(SIGILL, &sa, &osa);
-
- bool has_div = false;
-#if defined(__arm__)
- if (artCheckForARMSDIVInstruction()) {
- has_div = true;
- }
-#endif
-
- // Restore the signal handler.
- sigaction(SIGILL, &osa, nullptr);
-
- // Use compile time features to "detect" LPAE support.
- // TODO: write an assembly LPAE support test.
-#if defined(__ARM_FEATURE_LPAE)
- bool has_lpae = true;
-#else
- bool has_lpae = false;
-#endif
- return new ArmInstructionSetFeatures(has_lpae, has_div);
-}
-
-
-bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
- if (kArm != other->GetInstructionSet()) {
- return false;
- }
- const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
- return has_lpae_ == other_as_arm->has_lpae_ && has_div_ == other_as_arm->has_div_;
-}
-
-uint32_t ArmInstructionSetFeatures::AsBitmap() const {
- return (has_lpae_ ? kLpaeBitfield : 0) | (has_div_ ? kDivBitfield : 0);
-}
-
-std::string ArmInstructionSetFeatures::GetFeatureString() const {
- std::string result;
- if (has_div_) {
- result += ",div";
- }
- if (has_lpae_) {
- result += ",lpae";
- }
- if (result.size() == 0) {
- return "none";
- } else {
- // Strip leading comma.
- return result.substr(1, result.size());
- }
-}
-
-} // namespace art
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
deleted file mode 100644
index 84a3e80..0000000
--- a/runtime/instruction_set.h
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INSTRUCTION_SET_H_
-#define ART_RUNTIME_INSTRUCTION_SET_H_
-
-#include <iosfwd>
-#include <string>
-
-#include "base/logging.h" // Logging is required for FATAL in the helper functions.
-#include "base/macros.h"
-#include "base/value_object.h"
-#include "globals.h" // For KB.
-
-namespace art {
-
-enum InstructionSet {
- kNone,
- kArm,
- kArm64,
- kThumb2,
- kX86,
- kX86_64,
- kMips,
- kMips64
-};
-std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
-
-#if defined(__arm__)
-static constexpr InstructionSet kRuntimeISA = kArm;
-#elif defined(__aarch64__)
-static constexpr InstructionSet kRuntimeISA = kArm64;
-#elif defined(__mips__)
-static constexpr InstructionSet kRuntimeISA = kMips;
-#elif defined(__i386__)
-static constexpr InstructionSet kRuntimeISA = kX86;
-#elif defined(__x86_64__)
-static constexpr InstructionSet kRuntimeISA = kX86_64;
-#else
-static constexpr InstructionSet kRuntimeISA = kNone;
-#endif
-
-// Architecture-specific pointer sizes
-static constexpr size_t kArmPointerSize = 4;
-static constexpr size_t kArm64PointerSize = 8;
-static constexpr size_t kMipsPointerSize = 4;
-static constexpr size_t kMips64PointerSize = 8;
-static constexpr size_t kX86PointerSize = 4;
-static constexpr size_t kX86_64PointerSize = 8;
-
-// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
-// but ARM ELF requires 8..
-static constexpr size_t kArmAlignment = 8;
-
-// ARM64 instruction alignment. This is the recommended alignment for maximum performance.
-static constexpr size_t kArm64Alignment = 16;
-
-// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned.
-// TODO: Can this be 4?
-static constexpr size_t kMipsAlignment = 8;
-
-// X86 instruction alignment. This is the recommended alignment for maximum performance.
-static constexpr size_t kX86Alignment = 16;
-
-
-const char* GetInstructionSetString(InstructionSet isa);
-
-// Note: Returns kNone when the string cannot be parsed to a known value.
-InstructionSet GetInstructionSetFromString(const char* instruction_set);
-
-static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return kArmPointerSize;
- case kArm64:
- return kArm64PointerSize;
- case kX86:
- return kX86PointerSize;
- case kX86_64:
- return kX86_64PointerSize;
- case kMips:
- return kMipsPointerSize;
- case kMips64:
- return kMips64PointerSize;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have pointer size.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-size_t GetInstructionSetAlignment(InstructionSet isa);
-
-static inline bool Is64BitInstructionSet(InstructionSet isa) {
- switch (isa) {
- case kArm:
- case kThumb2:
- case kX86:
- case kMips:
- return false;
-
- case kArm64:
- case kX86_64:
- case kMips64:
- return true;
-
- case kNone:
- LOG(FATAL) << "ISA kNone does not have bit width.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return 4;
- case kArm64:
- return 8;
- case kX86:
- return 4;
- case kX86_64:
- return 8;
- case kMips:
- return 4;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have spills.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-static inline size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
- switch (isa) {
- case kArm:
- // Fall-through.
- case kThumb2:
- return 4;
- case kArm64:
- return 8;
- case kX86:
- return 8;
- case kX86_64:
- return 8;
- case kMips:
- return 4;
- case kNone:
- LOG(FATAL) << "ISA kNone does not have spills.";
- return 0;
- default:
- LOG(FATAL) << "Unknown ISA " << isa;
- return 0;
- }
-}
-
-size_t GetStackOverflowReservedBytes(InstructionSet isa);
-
-class ArmInstructionSetFeatures;
-
-// Abstraction used to describe features of a different instruction sets.
-class InstructionSetFeatures {
- public:
- // Process a CPU variant string for the given ISA and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromVariant(InstructionSet isa,
- const std::string& variant,
- std::string* error_msg);
-
- // Parse a string of the form "div,lpae" and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromFeatureString(InstructionSet isa,
- const std::string& feature_list,
- std::string* error_msg);
-
- // Parse a bitmap for the given isa and create an InstructionSetFeatures.
- static const InstructionSetFeatures* FromBitmap(InstructionSet isa, uint32_t bitmap);
-
- // Turn C pre-processor #defines into the equivalent instruction set features for kRuntimeISA.
- static const InstructionSetFeatures* FromCppDefines();
-
- // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const InstructionSetFeatures* FromCpuInfo();
-
- // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
- // InstructionSetFeatures.
- static const InstructionSetFeatures* FromHwcap();
-
- // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
- // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const InstructionSetFeatures* FromAssembly();
-
- // Are these features the same as the other given features?
- virtual bool Equals(const InstructionSetFeatures* other) const = 0;
-
- // Return the ISA these features relate to.
- virtual InstructionSet GetInstructionSet() const = 0;
-
- // Return a bitmap that represents the features. ISA specific.
- virtual uint32_t AsBitmap() const = 0;
-
- // Return a string of the form "div,lpae" or "none".
- virtual std::string GetFeatureString() const = 0;
-
- // Down cast this ArmInstructionFeatures.
- const ArmInstructionSetFeatures* AsArmInstructionSetFeatures() const;
-
- virtual ~InstructionSetFeatures() {}
-
- protected:
- InstructionSetFeatures() {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
-};
-std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs);
-
-// Instruction set features relevant to the ARM architecture.
-class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures {
- public:
- // Process a CPU variant string like "krait" or "cortex-a15" and create InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromVariant(const std::string& variant,
- std::string* error_msg);
-
- // Parse a string of the form "div,lpae" and create an InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromFeatureString(const std::string& feature_list,
- std::string* error_msg);
-
- // Parse a bitmap and create an InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromBitmap(uint32_t bitmap);
-
- // Turn C pre-processor #defines into the equivalent instruction set features.
- static const ArmInstructionSetFeatures* FromCppDefines();
-
- // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromCpuInfo();
-
- // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
- // InstructionSetFeatures.
- static const ArmInstructionSetFeatures* FromHwcap();
-
- // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
- // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
- static const ArmInstructionSetFeatures* FromAssembly();
-
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
-
- InstructionSet GetInstructionSet() const OVERRIDE {
- return kArm;
- }
-
- uint32_t AsBitmap() const OVERRIDE;
-
- // Return a string of the form "div,lpae" or "none".
- std::string GetFeatureString() const OVERRIDE;
-
- // Is the divide instruction feature enabled?
- bool HasDivideInstruction() const {
- return has_div_;
- }
-
- // Is the Large Physical Address Extension (LPAE) instruction feature enabled? When true code can
- // be used that assumes double register loads and stores (ldrd, strd) don't tear.
- bool HasLpae() const {
- return has_lpae_;
- }
-
- virtual ~ArmInstructionSetFeatures() {}
-
- private:
- ArmInstructionSetFeatures(bool has_lpae, bool has_div)
- : has_lpae_(has_lpae), has_div_(has_div) {
- }
-
- // Bitmap positions for encoding features as a bitmap.
- enum {
- kDivBitfield = 1,
- kLpaeBitfield = 2,
- };
-
- const bool has_lpae_;
- const bool has_div_;
-
- DISALLOW_COPY_AND_ASSIGN(ArmInstructionSetFeatures);
-};
-
-// A class used for instruction set features on ISAs that don't yet have any features defined.
-class UnknownInstructionSetFeatures FINAL : public InstructionSetFeatures {
- public:
- static const UnknownInstructionSetFeatures* Unknown(InstructionSet isa) {
- return new UnknownInstructionSetFeatures(isa);
- }
-
- bool Equals(const InstructionSetFeatures* other) const OVERRIDE {
- return isa_ == other->GetInstructionSet();
- }
-
- InstructionSet GetInstructionSet() const OVERRIDE {
- return isa_;
- }
-
- uint32_t AsBitmap() const OVERRIDE {
- return 0;
- }
-
- std::string GetFeatureString() const OVERRIDE {
- return "none";
- }
-
- virtual ~UnknownInstructionSetFeatures() {}
-
- private:
- explicit UnknownInstructionSetFeatures(InstructionSet isa) : isa_(isa) {}
-
- const InstructionSet isa_;
-
- DISALLOW_COPY_AND_ASSIGN(UnknownInstructionSetFeatures);
-};
-
-// The following definitions create return types for two word-sized entities that will be passed
-// in registers so that memory operations for the interface trampolines can be avoided. The entities
-// are the resolved method and the pointer to the code to be invoked.
-//
-// On x86, ARM32 and MIPS, this is given for a *scalar* 64bit value. The definition thus *must* be
-// uint64_t or long long int.
-//
-// On x86_64 and ARM64, structs are decomposed for allocation, so we can create a structs of two
-// size_t-sized values.
-//
-// We need two operations:
-//
-// 1) A flag value that signals failure. The assembly stubs expect the lower part to be "0".
-// GetTwoWordFailureValue() will return a value that has lower part == 0.
-//
-// 2) A value that combines two word-sized values.
-// GetTwoWordSuccessValue() constructs this.
-//
-// IMPORTANT: If you use this to transfer object pointers, it is your responsibility to ensure
-// that the object does not move or the value is updated. Simple use of this is NOT SAFE
-// when the garbage collector can move objects concurrently. Ensure that required locks
-// are held when using!
-
-#if defined(__i386__) || defined(__arm__) || defined(__mips__)
-typedef uint64_t TwoWordReturn;
-
-// Encodes method_ptr==nullptr and code_ptr==nullptr
-static inline constexpr TwoWordReturn GetTwoWordFailureValue() {
- return 0;
-}
-
-// Use the lower 32b for the method pointer and the upper 32b for the code pointer.
-static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
- static_assert(sizeof(uint32_t) == sizeof(uintptr_t), "Unexpected size difference");
- uint32_t lo32 = lo;
- uint64_t hi64 = static_cast<uint64_t>(hi);
- return ((hi64 << 32) | lo32);
-}
-
-#elif defined(__x86_64__) || defined(__aarch64__)
-struct TwoWordReturn {
- uintptr_t lo;
- uintptr_t hi;
-};
-
-// Encodes method_ptr==nullptr. Leaves random value in code pointer.
-static inline TwoWordReturn GetTwoWordFailureValue() {
- TwoWordReturn ret;
- ret.lo = 0;
- return ret;
-}
-
-// Write values into their respective members.
-static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
- TwoWordReturn ret;
- ret.lo = lo;
- ret.hi = hi;
- return ret;
-}
-#else
-#error "Unsupported architecture"
-#endif
-
-} // namespace art
-
-#endif // ART_RUNTIME_INSTRUCTION_SET_H_
diff --git a/runtime/instruction_set_test.cc b/runtime/instruction_set_test.cc
deleted file mode 100644
index 3f2d16b..0000000
--- a/runtime/instruction_set_test.cc
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set.h"
-
-#include "base/stringprintf.h"
-#include "common_runtime_test.h"
-
-namespace art {
-
-class InstructionSetTest : public CommonRuntimeTest {};
-
-TEST_F(InstructionSetTest, GetInstructionSetFromString) {
- EXPECT_EQ(kArm, GetInstructionSetFromString("arm"));
- EXPECT_EQ(kArm64, GetInstructionSetFromString("arm64"));
- EXPECT_EQ(kX86, GetInstructionSetFromString("x86"));
- EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
- EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
- EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
- EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
-}
-
-TEST_F(InstructionSetTest, GetInstructionSetString) {
- EXPECT_STREQ("arm", GetInstructionSetString(kArm));
- EXPECT_STREQ("arm", GetInstructionSetString(kThumb2));
- EXPECT_STREQ("arm64", GetInstructionSetString(kArm64));
- EXPECT_STREQ("x86", GetInstructionSetString(kX86));
- EXPECT_STREQ("x86_64", GetInstructionSetString(kX86_64));
- EXPECT_STREQ("mips", GetInstructionSetString(kMips));
- EXPECT_STREQ("none", GetInstructionSetString(kNone));
-}
-
-TEST_F(InstructionSetTest, TestRoundTrip) {
- EXPECT_EQ(kRuntimeISA, GetInstructionSetFromString(GetInstructionSetString(kRuntimeISA)));
-}
-
-TEST_F(InstructionSetTest, PointerSize) {
- EXPECT_EQ(sizeof(void*), GetInstructionSetPointerSize(kRuntimeISA));
-}
-
-TEST_F(InstructionSetTest, X86Features) {
- // Build features for a 32-bit x86 atom processor.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> x86_features(
- InstructionSetFeatures::FromVariant(kX86, "atom", &error_msg));
- ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
- EXPECT_TRUE(x86_features->Equals(x86_features.get()));
- EXPECT_STREQ("none", x86_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_features->AsBitmap(), 0U);
-
- // Build features for a 32-bit x86 default processor.
- std::unique_ptr<const InstructionSetFeatures> x86_default_features(
- InstructionSetFeatures::FromFeatureString(kX86, "default", &error_msg));
- ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
- EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
- EXPECT_STREQ("none", x86_default_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_default_features->AsBitmap(), 0U);
-
- // Build features for a 64-bit x86-64 atom processor.
- std::unique_ptr<const InstructionSetFeatures> x86_64_features(
- InstructionSetFeatures::FromVariant(kX86_64, "atom", &error_msg));
- ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
- EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
- EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
- EXPECT_STREQ("none", x86_64_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_64_features->AsBitmap(), 0U);
-
- EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
- EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
- EXPECT_TRUE(x86_features->Equals(x86_default_features.get()));
-}
-
-TEST_F(InstructionSetTest, ArmFeaturesFromVariant) {
- // Build features for a 32-bit ARM krait processor.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> krait_features(
- InstructionSetFeatures::FromVariant(kArm, "krait", &error_msg));
- ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
-
- ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
- EXPECT_TRUE(krait_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", krait_features->GetFeatureString().c_str());
- EXPECT_EQ(krait_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit ARM denver processor.
- std::unique_ptr<const InstructionSetFeatures> denver_features(
- InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
- ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit ARMv7 processor.
- std::unique_ptr<const InstructionSetFeatures> arm7_features(
- InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg));
- ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
- EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("none", arm7_features->GetFeatureString().c_str());
- EXPECT_EQ(arm7_features->AsBitmap(), 0U);
-
- // ARM6 is not a supported architecture variant.
- std::unique_ptr<const InstructionSetFeatures> arm6_features(
- InstructionSetFeatures::FromVariant(kArm, "arm6", &error_msg));
- EXPECT_TRUE(arm6_features.get() == nullptr);
- EXPECT_NE(error_msg.size(), 0U);
-}
-
-TEST_F(InstructionSetTest, ArmFeaturesFromString) {
- // Build features for a 32-bit ARM with LPAE and div processor.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> krait_features(
- InstructionSetFeatures::FromFeatureString(kArm, "lpae,div", &error_msg));
- ASSERT_TRUE(krait_features.get() != nullptr) << error_msg;
-
- ASSERT_EQ(krait_features->GetInstructionSet(), kArm);
- EXPECT_TRUE(krait_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", krait_features->GetFeatureString().c_str());
- EXPECT_EQ(krait_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit ARM processor with LPAE and div flipped.
- std::unique_ptr<const InstructionSetFeatures> denver_features(
- InstructionSetFeatures::FromFeatureString(kArm, "div,lpae", &error_msg));
- ASSERT_TRUE(denver_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(denver_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->Equals(krait_features.get()));
- EXPECT_TRUE(krait_features->Equals(denver_features.get()));
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("div,lpae", denver_features->GetFeatureString().c_str());
- EXPECT_EQ(denver_features->AsBitmap(), 3U);
-
- // Build features for a 32-bit default ARM processor.
- std::unique_ptr<const InstructionSetFeatures> arm7_features(
- InstructionSetFeatures::FromFeatureString(kArm, "default", &error_msg));
- ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(arm7_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->Equals(krait_features.get()));
- EXPECT_FALSE(krait_features->Equals(arm7_features.get()));
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
- EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasLpae());
- EXPECT_STREQ("none", arm7_features->GetFeatureString().c_str());
- EXPECT_EQ(arm7_features->AsBitmap(), 0U);
-}
-
-#ifdef HAVE_ANDROID_OS
-#include "cutils/properties.h"
-
-TEST_F(InstructionSetTest, FeaturesFromSystemPropertyVariant) {
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Read the features property.
- std::string key = StringPrintf("dalvik.vm.isa.%s.variant", GetInstructionSetString(kRuntimeISA));
- char dex2oat_isa_variant[PROPERTY_VALUE_MAX];
- if (property_get(key.c_str(), dex2oat_isa_variant, nullptr) > 0) {
- // Use features from property to build InstructionSetFeatures and check against build's
- // features.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> property_features(
- InstructionSetFeatures::FromVariant(kRuntimeISA, dex2oat_isa_variant, &error_msg));
- ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
- << "System property features: " << *property_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
- }
-}
-
-TEST_F(InstructionSetTest, FeaturesFromSystemPropertyString) {
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Read the features property.
- std::string key = StringPrintf("dalvik.vm.isa.%s.features", GetInstructionSetString(kRuntimeISA));
- char dex2oat_isa_features[PROPERTY_VALUE_MAX];
- if (property_get(key.c_str(), dex2oat_isa_features, nullptr) > 0) {
- // Use features from property to build InstructionSetFeatures and check against build's
- // features.
- std::string error_msg;
- std::unique_ptr<const InstructionSetFeatures> property_features(
- InstructionSetFeatures::FromFeatureString(kRuntimeISA, dex2oat_isa_features, &error_msg));
- ASSERT_TRUE(property_features.get() != nullptr) << error_msg;
-
- EXPECT_TRUE(property_features->Equals(instruction_set_features.get()))
- << "System property features: " << *property_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
- }
-}
-#endif
-
-#if defined(__arm__)
-TEST_F(InstructionSetTest, DISABLED_FeaturesFromCpuInfo) {
- LOG(WARNING) << "Test disabled due to buggy ARM kernels";
-#else
-TEST_F(InstructionSetTest, FeaturesFromCpuInfo) {
-#endif
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Check we get the same instruction set features using /proc/cpuinfo.
- std::unique_ptr<const InstructionSetFeatures> cpuinfo_features(
- InstructionSetFeatures::FromCpuInfo());
- EXPECT_TRUE(cpuinfo_features->Equals(instruction_set_features.get()))
- << "CPU Info features: " << *cpuinfo_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
-}
-
-#if defined(__arm__)
-TEST_F(InstructionSetTest, DISABLED_FeaturesFromHwcap) {
- LOG(WARNING) << "Test disabled due to buggy ARM kernels";
-#else
-TEST_F(InstructionSetTest, FeaturesFromHwcap) {
-#endif
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Check we get the same instruction set features using AT_HWCAP.
- std::unique_ptr<const InstructionSetFeatures> hwcap_features(
- InstructionSetFeatures::FromHwcap());
- EXPECT_TRUE(hwcap_features->Equals(instruction_set_features.get()))
- << "Hwcap features: " << *hwcap_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
-}
-
-
-#if defined(__arm__)
-TEST_F(InstructionSetTest, DISABLED_FeaturesFromAssembly) {
- LOG(WARNING) << "Test disabled due to buggy ARM kernels";
-#else
-TEST_F(InstructionSetTest, FeaturesFromAssembly) {
-#endif
- // Take the default set of instruction features from the build.
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features(
- InstructionSetFeatures::FromCppDefines());
-
- // Check we get the same instruction set features using assembly tests.
- std::unique_ptr<const InstructionSetFeatures> assembly_features(
- InstructionSetFeatures::FromAssembly());
- EXPECT_TRUE(assembly_features->Equals(instruction_set_features.get()))
- << "Assembly features: " << *assembly_features.get()
- << "\nFeatures from build: " << *instruction_set_features.get();
-}
-
-} // namespace art
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 646c7ae..369039d 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -21,8 +21,8 @@
#include <list>
#include <map>
+#include "arch/instruction_set.h"
#include "atomic.h"
-#include "instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 19e03d8..a5abce6 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -795,13 +795,13 @@
return JNI_OK;
}
-extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
+extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms_buf, jsize buf_len, jsize* vm_count) {
Runtime* runtime = Runtime::Current();
- if (runtime == nullptr) {
+ if (runtime == nullptr || buf_len == 0) {
*vm_count = 0;
} else {
*vm_count = 1;
- vms[0] = runtime->GetJavaVM();
+ vms_buf[0] = runtime->GetJavaVM();
}
return JNI_OK;
}
diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc
new file mode 100644
index 0000000..60c6a5c
--- /dev/null
+++ b/runtime/java_vm_ext_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+
+#include <pthread.h>
+
+#include "common_runtime_test.h"
+#include "java_vm_ext.h"
+#include "runtime.h"
+
+namespace art {
+
+class JavaVmExtTest : public CommonRuntimeTest {
+ protected:
+ virtual void SetUp() {
+ CommonRuntimeTest::SetUp();
+
+ vm_ = Runtime::Current()->GetJavaVM();
+ }
+
+
+ virtual void TearDown() OVERRIDE {
+ CommonRuntimeTest::TearDown();
+ }
+
+ JavaVMExt* vm_;
+};
+
+TEST_F(JavaVmExtTest, JNI_GetDefaultJavaVMInitArgs) {
+ jint err = JNI_GetDefaultJavaVMInitArgs(nullptr);
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+TEST_F(JavaVmExtTest, JNI_GetCreatedJavaVMs) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ EXPECT_EQ(1, num_vms);
+ EXPECT_EQ(vms_buf[0], vm_);
+}
+
+static bool gSmallStack = false;
+static bool gAsDaemon = false;
+
+static void* attach_current_thread_callback(void* arg ATTRIBUTE_UNUSED) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ JNIEnv* env;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ if (ok == JNI_OK) {
+ if (!gAsDaemon) {
+ ok = vms_buf[0]->AttachCurrentThread(&env, nullptr);
+ } else {
+ ok = vms_buf[0]->AttachCurrentThreadAsDaemon(&env, nullptr);
+ }
+ EXPECT_EQ(gSmallStack ? JNI_ERR : JNI_OK, ok);
+ if (ok == JNI_OK) {
+ ok = vms_buf[0]->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+ }
+ }
+ return nullptr;
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThreadAsDaemon) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = true;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread_SmallStack) {
+ pthread_t pthread;
+ pthread_attr_t attr;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = true;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, PTHREAD_STACK_MIN), reason);
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, &attr, attach_current_thread_callback,
+ nullptr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, DetachCurrentThread) {
+ JNIEnv* env;
+ jint ok = vm_->AttachCurrentThread(&env, nullptr);
+ ASSERT_EQ(JNI_OK, ok);
+ ok = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+
+ jint err = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+} // namespace art
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index e8c0856..7119ce5 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -272,7 +272,7 @@
/*
* Start by resolving the host name.
*/
-#ifdef HAVE_GETHOSTBYNAME_R
+#if defined(__linux__)
hostent he;
char auxBuf[128];
int error;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index b57cc17..ccad137 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -2019,14 +2019,4 @@
}
}
-TEST_F(JniInternalTest, DetachCurrentThread) {
- CleanUpJniEnv(); // cleanup now so TearDown won't have junk from wrong JNIEnv
- jint ok = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_OK, ok);
-
- jint err = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_ERR, err);
- vm_->AttachCurrentThread(&env_, nullptr); // need attached thread for CommonRuntimeTest::TearDown
-}
-
} // namespace art
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 5f72dbe..a69d37e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -402,6 +402,36 @@
return GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
}
+inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
+ Class* super_class = GetSuperClass();
+ return (super_class != nullptr)
+ ? MemberOffset(RoundUp(super_class->GetObjectSize(),
+ sizeof(mirror::HeapReference<mirror::Object>)))
+ : ClassOffset();
+}
+
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffset() {
+ DCHECK(IsResolved());
+ uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ // Static fields come after the embedded tables.
+ base = mirror::Class::ComputeClassSize(true, GetEmbeddedVTableLength(),
+ 0, 0, 0, 0, 0);
+ }
+ return MemberOffset(base);
+}
+
+inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking() {
+ DCHECK(IsLoaded());
+ uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ // Static fields come after the embedded tables.
+ base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
+ 0, 0, 0, 0, 0);
+ }
+ return MemberOffset(base);
+}
+
inline void Class::SetIFields(ObjectArray<ArtField>* new_ifields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(NULL == GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)));
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 4f1af44..f45ea85 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -837,6 +837,9 @@
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Get the offset of the first reference instance field. Other reference instance fields follow.
+ MemberOffset GetFirstReferenceInstanceFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
@@ -853,6 +856,13 @@
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
}
+ // Get the offset of the first reference static field. Other reference static fields follow.
+ MemberOffset GetFirstReferenceStaticFieldOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Get the offset of the first reference static field. Other reference static fields follow.
+ MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Gets the static fields of the class.
ObjectArray<ArtField>* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index c451764..4199eef 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -911,13 +911,19 @@
klass = kIsStatic ? nullptr : klass->GetSuperClass()) {
size_t num_reference_fields =
kIsStatic ? klass->NumReferenceStaticFields() : klass->NumReferenceInstanceFields();
+ if (num_reference_fields == 0u) {
+ continue;
+ }
+ MemberOffset field_offset = kIsStatic
+ ? klass->GetFirstReferenceStaticFieldOffset()
+ : klass->GetFirstReferenceInstanceFieldOffset();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = kIsStatic ? klass->GetStaticField(i) : klass->GetInstanceField(i);
- MemberOffset field_offset = field->GetOffset();
// TODO: Do a simpler check?
if (kVisitClass || field_offset.Uint32Value() != ClassOffset().Uint32Value()) {
visitor(this, field_offset, kIsStatic);
}
+ field_offset = MemberOffset(field_offset.Uint32Value() +
+ sizeof(mirror::HeapReference<mirror::Object>));
}
}
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 4227723..fa1f226 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -200,10 +200,11 @@
for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
ObjectArray<ArtField>* fields = cur->GetIFields();
if (fields != NULL) {
- size_t num_ref_ifields = cur->NumReferenceInstanceFields();
- for (size_t i = 0; i < num_ref_ifields; ++i) {
+ size_t num_ifields = fields->GetLength();
+ for (size_t i = 0; i < num_ifields; ++i) {
ArtField* field = fields->Get(i);
if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
+ CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
StackHandleScope<1> hs(Thread::Current());
FieldHelper fh(hs.NewHandle(field));
CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass()));
@@ -219,10 +220,11 @@
if (IsClass()) {
ObjectArray<ArtField>* fields = AsClass()->GetSFields();
if (fields != NULL) {
- size_t num_ref_sfields = AsClass()->NumReferenceStaticFields();
- for (size_t i = 0; i < num_ref_sfields; ++i) {
+ size_t num_sfields = fields->GetLength();
+ for (size_t i = 0; i < num_sfields; ++i) {
ArtField* field = fields->Get(i);
if (field->GetOffset().Int32Value() == field_offset.Int32Value()) {
+ CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot);
StackHandleScope<1> hs(Thread::Current());
FieldHelper fh(hs.NewHandle(field));
CHECK(fh.GetType()->IsAssignableFrom(new_value->GetClass()));
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 6445b88..233267b 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -655,8 +655,6 @@
Thread* owner;
{
ScopedThreadStateChange tsc(self, kBlocked);
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
}
if (owner != nullptr) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index f6e2b21..d40d64b 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -17,8 +17,14 @@
#include "dalvik_system_VMRuntime.h"
#include <limits.h>
+#include <ScopedUtfChars.h>
-#include "ScopedUtfChars.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "toStringArray.h"
+#pragma GCC diagnostic pop
+
+#include "arch/instruction_set.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "debugger.h"
@@ -28,7 +34,6 @@
#include "gc/heap.h"
#include "gc/space/dlmalloc_space.h"
#include "gc/space/image_space.h"
-#include "instruction_set.h"
#include "intern_table.h"
#include "jni_internal.h"
#include "mirror/art_method-inl.h"
@@ -41,11 +46,6 @@
#include "thread.h"
#include "thread_list.h"
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wshadow"
-#include "toStringArray.h"
-#pragma GCC diagnostic pop
-
namespace art {
static jfloat VMRuntime_getTargetHeapUtilization(JNIEnv*, jobject) {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index e396dad..2cdc68f 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -38,12 +38,7 @@
soa.Self()->TransitionFromRunnableToSuspended(kNative);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
- Thread* thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
if (thread != nullptr) {
// Must be runnable to create returned array.
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 0966954..f1a04cb 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -18,8 +18,8 @@
#include <stdlib.h>
+#include "arch/instruction_set.h"
#include "debugger.h"
-#include "instruction_set.h"
#include "java_vm_ext.h"
#include "jni_internal.h"
#include "JNIHelp.h"
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 0722a24..420e9df 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -133,11 +133,7 @@
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
- Thread* thread;
- {
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
if (thread != NULL) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index b74430f..987427e 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -63,12 +63,7 @@
}
// Suspend thread to build stack trace.
- Thread* thread;
- {
- // Take suspend thread lock to avoid races with threads trying to suspend this one.
- MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
- thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
- }
+ Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 0749c06..2f7357f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -15,15 +15,17 @@
*/
#include "oat.h"
-#include "utils.h"
#include <string.h>
#include <zlib.h>
+#include "arch/instruction_set_features.h"
+#include "utils.h"
+
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '4', '6', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '4', '7', '\0' };
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
diff --git a/runtime/oat.h b/runtime/oat.h
index f577b07..8fb02b8 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -19,14 +19,16 @@
#include <vector>
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "dex_file.h"
-#include "instruction_set.h"
#include "quick/quick_method_frame_info.h"
#include "safe_map.h"
namespace art {
+class InstructionSetFeatures;
+
class PACKED(4) OatHeader {
public:
static const uint8_t kOatMagic[4];
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 26a2f31..9294868 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -25,7 +25,7 @@
#include "globals.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
-#include "instruction_set.h"
+#include "arch/instruction_set.h"
#include "profiler_options.h"
namespace art {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d338ad7..78c6542 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -37,6 +37,7 @@
#include "arch/arm/registers_arm.h"
#include "arch/arm64/quick_method_frame_info_arm64.h"
#include "arch/arm64/registers_arm64.h"
+#include "arch/instruction_set_features.h"
#include "arch/mips/quick_method_frame_info_mips.h"
#include "arch/mips/registers_mips.h"
#include "arch/x86/quick_method_frame_info_x86.h"
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 3cbe1e5..39fd910 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -26,11 +26,11 @@
#include <utility>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/allocator.h"
#include "compiler_callbacks.h"
#include "gc_root.h"
#include "instrumentation.h"
-#include "instruction_set.h"
#include "jobject_comparator.h"
#include "object_callbacks.h"
#include "offsets.h"
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index d4ec803..d448460 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -27,10 +27,10 @@
#include <sstream>
+#include "arch/instruction_set.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "gc/heap.h"
-#include "instruction_set.h"
#include "os.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/stack.h b/runtime/stack.h
index 66c840d..1d772e6 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,8 +20,8 @@
#include <stdint.h>
#include <string>
+#include "arch/instruction_set.h"
#include "dex_file.h"
-#include "instruction_set.h"
#include "mirror/object_reference.h"
#include "throw_location.h"
#include "utils.h"
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 94f7585..e30e745 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -85,7 +85,7 @@
bool bad_mutexes_held = false;
for (int i = kLockLevelCount - 1; i >= 0; --i) {
// We expect no locks except the mutator_lock_ or thread list suspend thread lock.
- if (i != kMutatorLock && i != kThreadListSuspendThreadLock) {
+ if (i != kMutatorLock) {
BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
if (held_mutex != NULL) {
LOG(ERROR) << "holding \"" << held_mutex->GetName()
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2c828f1..c769faf 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -158,7 +158,7 @@
// Check that if we got here we cannot be shutting down (as shutdown should never have started
// while threads are being born).
CHECK(!runtime->IsShuttingDownLocked());
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM()));
Runtime::Current()->EndThreadBirth();
}
{
@@ -348,40 +348,46 @@
}
}
-void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
+bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
// This function does all the initialization that must be run by the native thread it applies to.
// (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
// we can handshake with the corresponding native thread when it's ready.) Check this native
// thread hasn't been through here already...
CHECK(Thread::Current() == nullptr);
+
+ // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
+ // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
+ tlsPtr_.pthread_self = pthread_self();
+ CHECK(is_started_);
+
SetUpAlternateSignalStack();
+ if (!InitStackHwm()) {
+ return false;
+ }
InitCpu();
InitTlsEntryPoints();
RemoveSuspendTrigger();
InitCardTable();
InitTid();
- // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
- // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
- tlsPtr_.pthread_self = pthread_self();
- CHECK(is_started_);
+
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
DCHECK_EQ(Thread::Current(), this);
tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
- InitStackHwm();
tlsPtr_.jni_env = new JNIEnvExt(this, java_vm);
thread_list->Register(this);
+ return true;
}
Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- Thread* self;
Runtime* runtime = Runtime::Current();
if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
return nullptr;
}
+ Thread* self;
{
MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
@@ -390,8 +396,12 @@
} else {
Runtime::Current()->StartThreadBirth();
self = new Thread(as_daemon);
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
Runtime::Current()->EndThreadBirth();
+ if (!init_success) {
+ delete self;
+ return nullptr;
+ }
}
}
@@ -432,7 +442,7 @@
}
ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
// Add missing null check in case of OOM b/18297817
- if (thread_name.get() == nullptr) {
+ if (name != nullptr && thread_name.get() == nullptr) {
CHECK(IsExceptionPending());
return;
}
@@ -499,7 +509,7 @@
Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
}
-void Thread::InitStackHwm() {
+bool Thread::InitStackHwm() {
void* read_stack_base;
size_t read_stack_size;
size_t read_guard_size;
@@ -521,8 +531,10 @@
uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
+ 4 * KB;
if (read_stack_size <= min_stack) {
- LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
- << " bytes)";
+ // Note, as we know the stack is small, avoid operations that could use a lot of stack.
+ LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
+ "Attempt to attach a thread with a too-small stack");
+ return false;
}
// Set stack_end_ to the bottom of the stack saving space of stack overflows
@@ -547,6 +559,8 @@
// Sanity check.
int stack_variable;
CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
+
+ return true;
}
void Thread::ShortDump(std::ostream& os) const {
@@ -1047,7 +1061,8 @@
}
// Allocate a TLS slot.
- CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
+ CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
+ "self key");
// Double-check the TLS slot allocation.
if (pthread_getspecific(pthread_key_self_) != nullptr) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 89aee04..b69d2f4 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -25,6 +25,7 @@
#include <setjmp.h>
#include <string>
+#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -34,7 +35,6 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "globals.h"
#include "handle_scope.h"
-#include "instruction_set.h"
#include "jvalue.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -893,14 +893,14 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ bool Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
void InitCardTable();
void InitCpu();
void CleanupCpu();
void InitTlsEntryPoints();
void InitTid();
void InitPthreadKeySelf();
- void InitStackHwm();
+ bool InitStackHwm();
void SetUpAlternateSignalStack();
void TearDownAlternateSignalStack();
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 675ce9a..5ff90d6 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -530,6 +530,12 @@
{
MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (request_suspension) {
+ if (self->GetSuspendCount() > 0) {
+ // We hold the suspend count lock but another thread is trying to suspend us. Its not
+ // safe to try to suspend another thread in case we get a cycle. Start the loop again
+ // which will allow this thread to be suspended.
+ continue;
+ }
thread->ModifySuspendCount(self, +1, debug_suspension);
request_suspension = false;
did_suspend_request = true;
@@ -608,6 +614,12 @@
{
MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (suspended_thread == nullptr) {
+ if (self->GetSuspendCount() > 0) {
+ // We hold the suspend count lock but another thread is trying to suspend us. Its not
+ // safe to try to suspend another thread in case we get a cycle. Start the loop again
+ // which will allow this thread to be suspended.
+ continue;
+ }
thread->ModifySuspendCount(self, +1, debug_suspension);
suspended_thread = thread;
} else {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index a7f2c53..13684c7 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,7 +68,6 @@
// is set to true.
Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -78,7 +77,6 @@
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index f2d710d..9a4c875 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1120,7 +1120,8 @@
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
mirror::ArtMethod* current_method) {
-#ifdef __linux__
+ // TODO: enable on __linux__ b/15446488.
+#if 0
// b/18119146
if (RUNNING_ON_VALGRIND != 0) {
return;
diff --git a/runtime/utils.h b/runtime/utils.h
index 669fe6c..d83013a 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -24,10 +24,10 @@
#include <string>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/mutex.h"
#include "globals.h"
-#include "instruction_set.h"
#include "primitive.h"
namespace art {
@@ -115,6 +115,20 @@
return (-limit <= value) && (value < limit);
}
+static inline bool IsInt32(int N, int32_t value) {
+ CHECK_LT(0, N);
+ CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int32_t));
+ int32_t limit = static_cast<int32_t>(1) << (N - 1);
+ return (-limit <= value) && (value < limit);
+}
+
+static inline bool IsInt64(int N, int64_t value) {
+ CHECK_LT(0, N);
+ CHECK_LT(static_cast<size_t>(N), 8 * sizeof(int64_t));
+ int64_t limit = static_cast<int64_t>(1) << (N - 1);
+ return (-limit <= value) && (value < limit);
+}
+
static inline bool IsUint(int N, intptr_t value) {
CHECK_LT(0, N);
CHECK_LT(N, kBitsPerIntPtrT);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 357acf0..0c4bf3c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -18,32 +18,26 @@
#define ART_RUNTIME_VERIFIER_METHOD_VERIFIER_H_
#include <memory>
-#include <set>
#include <vector>
-#include "base/casts.h"
#include "base/macros.h"
-#include "base/stl_util.h"
-#include "class_reference.h"
#include "dex_file.h"
-#include "dex_instruction.h"
#include "handle.h"
#include "instruction_flags.h"
#include "method_reference.h"
-#include "reg_type.h"
#include "reg_type_cache.h"
-#include "register_line.h"
-#include "safe_map.h"
namespace art {
+class Instruction;
struct ReferenceMap2Visitor;
-template<class T> class Handle;
namespace verifier {
-class MethodVerifier;
class DexPcToReferenceMap;
+class MethodVerifier;
+class RegisterLine;
+class RegType;
/*
* "Direct" and "virtual" methods are stored independently. The type of call used to invoke the
@@ -128,6 +122,8 @@
private:
std::unique_ptr<RegisterLine*[]> register_lines_;
size_t size_;
+
+ DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
};
// The verifier
@@ -733,6 +729,8 @@
// even though we might detect to be a compiler. Should only be set when running
// VerifyMethodAndDump.
const bool verify_to_dump_;
+
+ DISALLOW_COPY_AND_ASSIGN(MethodVerifier);
};
std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 34d6caa..05958b5 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -17,17 +17,14 @@
#ifndef ART_RUNTIME_VERIFIER_REG_TYPE_H_
#define ART_RUNTIME_VERIFIER_REG_TYPE_H_
-#include <limits>
#include <stdint.h>
+#include <limits>
#include <set>
#include <string>
-#include "jni.h"
-
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
-#include "globals.h"
#include "object_callbacks.h"
#include "primitive.h"
@@ -35,6 +32,7 @@
namespace mirror {
class Class;
} // namespace mirror
+
namespace verifier {
class RegTypeCache;
@@ -578,17 +576,17 @@
bool IsConstantChar() const OVERRIDE {
return IsConstant() && ConstantValue() >= 0 &&
- ConstantValue() <= std::numeric_limits<jchar>::max();
+ ConstantValue() <= std::numeric_limits<uint16_t>::max();
}
bool IsConstantByte() const OVERRIDE {
return IsConstant() &&
- ConstantValue() >= std::numeric_limits<jbyte>::min() &&
- ConstantValue() <= std::numeric_limits<jbyte>::max();
+ ConstantValue() >= std::numeric_limits<int8_t>::min() &&
+ ConstantValue() <= std::numeric_limits<int8_t>::max();
}
bool IsConstantShort() const OVERRIDE {
return IsConstant() &&
- ConstantValue() >= std::numeric_limits<jshort>::min() &&
- ConstantValue() <= std::numeric_limits<jshort>::max();
+ ConstantValue() >= std::numeric_limits<int16_t>::min() &&
+ ConstantValue() <= std::numeric_limits<int16_t>::max();
}
virtual bool IsConstantTypes() const OVERRIDE { return true; }
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 219e687..244deed 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -114,6 +114,17 @@
}
}
+inline size_t RegisterLine::GetMaxNonZeroReferenceReg(MethodVerifier* verifier,
+ size_t max_ref_reg) const {
+ size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg;
+ for (; i < num_regs_; i++) {
+ if (GetRegisterType(verifier, i).IsNonZeroReferenceTypes()) {
+ max_ref_reg = i;
+ }
+ }
+ return max_ref_reg;
+}
+
inline bool RegisterLine::VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
const RegType& check_type) {
// Verify the src register type against the check type refining the type of the register
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 3139204..72d7938 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -310,8 +310,12 @@
verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: "
<< monitors_.size();
} else {
- SetRegToLockDepth(reg_idx, monitors_.size());
- monitors_.push_back(insn_idx);
+ if (SetRegToLockDepth(reg_idx, monitors_.size())) {
+ monitors_.push_back(insn_idx);
+ } else {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected monitor-enter on register v" <<
+ reg_idx;
+ }
}
}
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 8f7823a..52b5c13 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -20,14 +20,16 @@
#include <memory>
#include <vector>
-#include "dex_instruction.h"
-#include "reg_type.h"
#include "safe_map.h"
namespace art {
+
+class Instruction;
+
namespace verifier {
class MethodVerifier;
+class RegType;
/*
* Register type categories, for type checking.
@@ -275,15 +277,7 @@
bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) {
- size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg;
- for (; i < num_regs_; i++) {
- if (GetRegisterType(verifier, i).IsNonZeroReferenceTypes()) {
- max_ref_reg = i;
- }
- }
- return max_ref_reg;
- }
+ size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) const;
// Write a bit at each register location that holds a reference.
void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
@@ -313,15 +307,18 @@
}
}
- void SetRegToLockDepth(size_t reg, size_t depth) {
+ bool SetRegToLockDepth(size_t reg, size_t depth) {
CHECK_LT(depth, 32u);
- DCHECK(!IsSetLockDepth(reg, depth));
+ if (IsSetLockDepth(reg, depth)) {
+ return false; // Register already holds lock so locking twice is erroneous.
+ }
auto it = reg_to_lock_depths_.find(reg);
if (it == reg_to_lock_depths_.end()) {
reg_to_lock_depths_.Put(reg, 1 << depth);
} else {
it->second |= (1 << depth);
}
+ return true;
}
void ClearRegToLockDepth(size_t reg, size_t depth) {
@@ -347,21 +344,23 @@
SetResultTypeToUnknown(verifier);
}
- // Storage for the result register's type, valid after an invocation
+ // Storage for the result register's type, valid after an invocation.
uint16_t result_[2];
// Length of reg_types_
const uint32_t num_regs_;
- // A stack of monitor enter locations
+ // A stack of monitor enter locations.
std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
// A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
// stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
- // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5
+ // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
// An array of RegType Ids associated with each dex register.
uint16_t line_[0];
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterLine);
};
} // namespace verifier
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 1af4121..8fdeccc 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -32,7 +32,7 @@
62 (class java.lang.Long)
14 (class java.lang.Short)
[public java.lang.String(), java.lang.String(int,int,char[]), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int)]
-[private final char[] java.lang.String.value, private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final long java.lang.String.serialVersionUID, private static final char java.lang.String.REPLACEMENT_CHAR]
+[private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private final char[] java.lang.String.value, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final char java.lang.String.REPLACEMENT_CHAR, private static final long java.lang.String.serialVersionUID]
[void java.lang.String._getChars(int,int,char[],int), public char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public volatile int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private char java.lang.String.foldCase(char), public static transient java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static transient java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
[]
[interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
diff --git a/test/128-reg-spilling-on-implicit-nullcheck/expected.txt b/test/128-reg-spilling-on-implicit-nullcheck/expected.txt
new file mode 100644
index 0000000..9bdf658
--- /dev/null
+++ b/test/128-reg-spilling-on-implicit-nullcheck/expected.txt
@@ -0,0 +1 @@
+t7q = 2
diff --git a/test/128-reg-spilling-on-implicit-nullcheck/info.txt b/test/128-reg-spilling-on-implicit-nullcheck/info.txt
new file mode 100644
index 0000000..18b2112
--- /dev/null
+++ b/test/128-reg-spilling-on-implicit-nullcheck/info.txt
@@ -0,0 +1 @@
+This is a compiler reggression test for missing reg spilling on implicit nullcheck.
diff --git a/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java b/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java
new file mode 100644
index 0000000..48276bf
--- /dev/null
+++ b/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) {
+ int t7q = 0;
+ long q = 1L;
+
+ try {
+ for (int i = 1; i < 8; i++) {
+ t7q = (--t7q);
+ TestClass f = null;
+ t7q = f.field;
+ }
+ }
+ catch (NullPointerException wpw) {
+ q++;
+ }
+ finally {
+ t7q += (int)(1 - ((q - q) - 2));
+ }
+
+ System.out.println("t7q = " + t7q);
+ }
+}
+
+class TestClass {
+ public int field;
+ public void meth() {field = 1;}
+}
diff --git a/test/411-optimizing-arith/src/Main.java b/test/411-optimizing-arith/src/Main.java
index a22c516..3a5d7c0 100644
--- a/test/411-optimizing-arith/src/Main.java
+++ b/test/411-optimizing-arith/src/Main.java
@@ -101,7 +101,7 @@
expectEquals(0L, $opt$Mul(3L, 0L));
expectEquals(-3L, $opt$Mul(1L, -3L));
expectEquals(36L, $opt$Mul(-12L, -3L));
- expectEquals(33L, $opt$Mul(1L, 3L) * 11F);
+ expectEquals(33L, $opt$Mul(1L, 3L) * 11L);
expectEquals(240518168583L, $opt$Mul(34359738369L, 7L)); // (2^35 + 1) * 7
}
diff --git a/test/415-optimizing-arith-neg/src/Main.java b/test/415-optimizing-arith-neg/src/Main.java
index e2850ca..d9f8bcf 100644
--- a/test/415-optimizing-arith-neg/src/Main.java
+++ b/test/415-optimizing-arith-neg/src/Main.java
@@ -71,8 +71,8 @@
assertEquals(0, $opt$NegInt(0));
assertEquals(51, $opt$NegInt(-51));
assertEquals(-51, $opt$NegInt(51));
- assertEquals(2147483647, $opt$NegInt(-2147483647)); // (2^31 - 1)
- assertEquals(-2147483647, $opt$NegInt(2147483647)); // -(2^31 - 1)
+ assertEquals(2147483647, $opt$NegInt(-2147483647)); // -(2^31 - 1)
+ assertEquals(-2147483647, $opt$NegInt(2147483647)); // 2^31 - 1
// From the Java 7 SE Edition specification:
// http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.15.4
//
@@ -98,13 +98,13 @@
assertEquals(51L, $opt$NegLong(-51L));
assertEquals(-51L, $opt$NegLong(51L));
- assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // (2^31 - 1)
- assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // -(2^31 - 1)
- assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // 2^31
- assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // -(2^31)
+ assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // -(2^31 - 1)
+ assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // (2^31 - 1)
+ assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // -(2^31)
+ assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // 2^31
- assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // (2^63 - 1)
- assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // -(2^63 - 1)
+ assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // -(2^63 - 1)
+ assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // 2^63 - 1
// See remark regarding the negation of the maximum negative
// (long) value in negInt().
assertEquals(-9223372036854775808L, $opt$NegLong(-9223372036854775808L)); // -(2^63)
diff --git a/test/416-optimizing-arith-not/src/Main.java b/test/416-optimizing-arith-not/src/Main.java
index 26e206c..44c7d3c 100644
--- a/test/416-optimizing-arith-not/src/Main.java
+++ b/test/416-optimizing-arith-not/src/Main.java
@@ -40,10 +40,10 @@
expectEquals(0, smaliNotInt(-1));
expectEquals(-1, smaliNotInt(0));
expectEquals(-2, smaliNotInt(1));
- expectEquals(2147483647, smaliNotInt(-2147483648)); // (2^31) - 1
- expectEquals(2147483646, smaliNotInt(-2147483647)); // (2^31) - 2
- expectEquals(-2147483647, smaliNotInt(2147483646)); // -(2^31) - 1
- expectEquals(-2147483648, smaliNotInt(2147483647)); // -(2^31)
+ expectEquals(2147483647, smaliNotInt(-2147483648)); // -(2^31)
+ expectEquals(2147483646, smaliNotInt(-2147483647)); // -(2^31 - 1)
+ expectEquals(-2147483647, smaliNotInt(2147483646)); // 2^31 - 2
+ expectEquals(-2147483648, smaliNotInt(2147483647)); // 2^31 - 1
}
private static void notLong() throws Exception {
@@ -51,14 +51,14 @@
expectEquals(0L, smaliNotLong(-1L));
expectEquals(-1L, smaliNotLong(0L));
expectEquals(-2L, smaliNotLong(1L));
- expectEquals(2147483647L, smaliNotLong(-2147483648L)); // (2^31) - 1
- expectEquals(2147483646L, smaliNotLong(-2147483647L)); // (2^31) - 2
- expectEquals(-2147483647L, smaliNotLong(2147483646L)); // -(2^31) - 1
- expectEquals(-2147483648L, smaliNotLong(2147483647L)); // -(2^31)
- expectEquals(9223372036854775807L, smaliNotLong(-9223372036854775808L)); // (2^63) - 1
- expectEquals(9223372036854775806L, smaliNotLong(-9223372036854775807L)); // (2^63) - 2
- expectEquals(-9223372036854775807L, smaliNotLong(9223372036854775806L)); // -(2^63) - 1
- expectEquals(-9223372036854775808L, smaliNotLong(9223372036854775807L)); // -(2^63)
+ expectEquals(2147483647L, smaliNotLong(-2147483648L)); // -(2^31)
+ expectEquals(2147483646L, smaliNotLong(-2147483647L)); // -(2^31 - 1)
+ expectEquals(-2147483647L, smaliNotLong(2147483646L)); // 2^31 - 2
+ expectEquals(-2147483648L, smaliNotLong(2147483647L)); // 2^31 - 1
+ expectEquals(9223372036854775807L, smaliNotLong(-9223372036854775808L)); // -(2^63)
+ expectEquals(9223372036854775806L, smaliNotLong(-9223372036854775807L)); // -(2^63 - 1)
+ expectEquals(-9223372036854775807L, smaliNotLong(9223372036854775806L)); // 2^63 - 2
+ expectEquals(-9223372036854775808L, smaliNotLong(9223372036854775807L)); // 2^63 - 1
}
// Wrappers around methods located in file not.smali.
diff --git a/test/417-optimizing-arith-div/src/Main.java b/test/417-optimizing-arith-div/src/Main.java
index 5825d24..a5dea15 100644
--- a/test/417-optimizing-arith-div/src/Main.java
+++ b/test/417-optimizing-arith-div/src/Main.java
@@ -78,18 +78,33 @@
} catch (java.lang.RuntimeException e) {
}
}
+
+ public static void expectDivisionByZero(long value) {
+ try {
+ $opt$Div(value, 0L);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ try {
+ $opt$DivZero(value);
+ throw new Error("Expected RuntimeException when dividing by 0");
+ } catch (java.lang.RuntimeException e) {
+ }
+ }
+
public static void main(String[] args) {
div();
}
public static void div() {
divInt();
+ divLong();
divFloat();
divDouble();
}
private static void divInt() {
- expectEquals(2, $opt$DivLit(6));
+ expectEquals(2, $opt$DivConst(6));
expectEquals(2, $opt$Div(6, 3));
expectEquals(6, $opt$Div(6, 1));
expectEquals(-2, $opt$Div(6, -3));
@@ -111,6 +126,35 @@
expectDivisionByZero(Integer.MIN_VALUE);
}
+ private static void divLong() {
+ expectEquals(2L, $opt$DivConst(6L));
+ expectEquals(2L, $opt$Div(6L, 3L));
+ expectEquals(6L, $opt$Div(6L, 1L));
+ expectEquals(-2L, $opt$Div(6L, -3L));
+ expectEquals(1L, $opt$Div(4L, 3L));
+ expectEquals(-1L, $opt$Div(4L, -3L));
+ expectEquals(5L, $opt$Div(23L, 4L));
+ expectEquals(-5L, $opt$Div(-23L, 4L));
+
+ expectEquals(-Integer.MAX_VALUE, $opt$Div(Integer.MAX_VALUE, -1L));
+ expectEquals(2147483648L, $opt$Div(Integer.MIN_VALUE, -1L));
+ expectEquals(-1073741824L, $opt$Div(Integer.MIN_VALUE, 2L));
+
+ expectEquals(-Long.MAX_VALUE, $opt$Div(Long.MAX_VALUE, -1L));
+ expectEquals(Long.MIN_VALUE, $opt$Div(Long.MIN_VALUE, -1L)); // overflow
+
+ expectEquals(11111111111111L, $opt$Div(33333333333333L, 3L));
+ expectEquals(3L, $opt$Div(33333333333333L, 11111111111111L));
+
+ expectEquals(0L, $opt$Div(0L, Long.MAX_VALUE));
+ expectEquals(0L, $opt$Div(0L, Long.MIN_VALUE));
+
+ expectDivisionByZero(0L);
+ expectDivisionByZero(1L);
+ expectDivisionByZero(Long.MAX_VALUE);
+ expectDivisionByZero(Long.MIN_VALUE);
+ }
+
private static void divFloat() {
expectApproxEquals(1.6666666F, $opt$Div(5F, 3F));
expectApproxEquals(0F, $opt$Div(0F, 3F));
@@ -178,10 +222,22 @@
}
// Division by literals != 0 should not generate checks.
- static int $opt$DivLit(int a) {
+ static int $opt$DivConst(int a) {
return a / 3;
}
+ static long $opt$DivConst(long a) {
+ return a / 3L;
+ }
+
+ static long $opt$Div(long a, long b) {
+ return a / b;
+ }
+
+ static long $opt$DivZero(long a) {
+ return a / 0L;
+ }
+
static float $opt$Div(float a, float b) {
return a / b;
}
diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java
index d61f255..88b4528 100644
--- a/test/422-type-conversion/src/Main.java
+++ b/test/422-type-conversion/src/Main.java
@@ -18,71 +18,232 @@
// it does compile the method.
public class Main {
- public static void assertEquals(long expected, long result) {
+ public static void assertByteEquals(byte expected, byte result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
}
}
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertCharEquals(char expected, char result) {
+ if (expected != result) {
+ // Values are cast to int to display numeric values instead of
+ // (Unicode) characters.
+ throw new Error("Expected: " + (int)expected + ", found: " + (int)result);
+ }
+ }
+
public static void main(String[] args) {
byteToLong();
- charToLong();
shortToLong();
intToLong();
+ charToLong();
+
+ longToInt();
+
+ shortToByte();
+ intToByte();
+ charToByte();
+
+ byteToChar();
+ shortToChar();
+ intToChar();
}
private static void byteToLong() {
- assertEquals(1L, $opt$ByteToLong((byte)1));
- assertEquals(0L, $opt$ByteToLong((byte)0));
- assertEquals(-1L, $opt$ByteToLong((byte)-1));
- assertEquals(51L, $opt$ByteToLong((byte)51));
- assertEquals(-51L, $opt$ByteToLong((byte)-51));
- assertEquals(127L, $opt$ByteToLong((byte)127)); // (2^7) - 1
- assertEquals(-127L, $opt$ByteToLong((byte)-127)); // -(2^7) - 1
- assertEquals(-128L, $opt$ByteToLong((byte)-128)); // -(2^7)
+ assertLongEquals(1L, $opt$ByteToLong((byte)1));
+ assertLongEquals(0L, $opt$ByteToLong((byte)0));
+ assertLongEquals(-1L, $opt$ByteToLong((byte)-1));
+ assertLongEquals(51L, $opt$ByteToLong((byte)51));
+ assertLongEquals(-51L, $opt$ByteToLong((byte)-51));
+ assertLongEquals(127L, $opt$ByteToLong((byte)127)); // 2^7 - 1
+ assertLongEquals(-127L, $opt$ByteToLong((byte)-127)); // -(2^7 - 1)
+ assertLongEquals(-128L, $opt$ByteToLong((byte)-128)); // -(2^7)
}
private static void shortToLong() {
- assertEquals(1L, $opt$ShortToLong((short)1));
- assertEquals(0L, $opt$ShortToLong((short)0));
- assertEquals(-1L, $opt$ShortToLong((short)-1));
- assertEquals(51L, $opt$ShortToLong((short)51));
- assertEquals(-51L, $opt$ShortToLong((short)-51));
- assertEquals(32767L, $opt$ShortToLong((short)32767)); // (2^15) - 1
- assertEquals(-32767L, $opt$ShortToLong((short)-32767)); // -(2^15) - 1
- assertEquals(-32768L, $opt$ShortToLong((short)-32768)); // -(2^15)
+ assertLongEquals(1L, $opt$ShortToLong((short)1));
+ assertLongEquals(0L, $opt$ShortToLong((short)0));
+ assertLongEquals(-1L, $opt$ShortToLong((short)-1));
+ assertLongEquals(51L, $opt$ShortToLong((short)51));
+ assertLongEquals(-51L, $opt$ShortToLong((short)-51));
+ assertLongEquals(32767L, $opt$ShortToLong((short)32767)); // 2^15 - 1
+ assertLongEquals(-32767L, $opt$ShortToLong((short)-32767)); // -(2^15 - 1)
+ assertLongEquals(-32768L, $opt$ShortToLong((short)-32768)); // -(2^15)
}
private static void intToLong() {
- assertEquals(1L, $opt$IntToLong(1));
- assertEquals(0L, $opt$IntToLong(0));
- assertEquals(-1L, $opt$IntToLong(-1));
- assertEquals(51L, $opt$IntToLong(51));
- assertEquals(-51L, $opt$IntToLong(-51));
- assertEquals(2147483647L, $opt$IntToLong(2147483647)); // (2^31) - 1
- assertEquals(-2147483647L, $opt$IntToLong(-2147483647)); // -(2^31) - 1
- assertEquals(-2147483648L, $opt$IntToLong(-2147483648)); // -(2^31)
+ assertLongEquals(1L, $opt$IntToLong(1));
+ assertLongEquals(0L, $opt$IntToLong(0));
+ assertLongEquals(-1L, $opt$IntToLong(-1));
+ assertLongEquals(51L, $opt$IntToLong(51));
+ assertLongEquals(-51L, $opt$IntToLong(-51));
+ assertLongEquals(2147483647L, $opt$IntToLong(2147483647)); // 2^31 - 1
+ assertLongEquals(-2147483647L, $opt$IntToLong(-2147483647)); // -(2^31 - 1)
+ assertLongEquals(-2147483648L, $opt$IntToLong(-2147483648)); // -(2^31)
}
private static void charToLong() {
- assertEquals(1L, $opt$CharToLong((char)1));
- assertEquals(0L, $opt$CharToLong((char)0));
- assertEquals(51L, $opt$CharToLong((char)51));
- assertEquals(32767L, $opt$CharToLong((char)32767)); // (2^15) - 1
- assertEquals(65535L, $opt$CharToLong((char)65535)); // (2^16) - 1
+ assertLongEquals(1L, $opt$CharToLong((char)1));
+ assertLongEquals(0L, $opt$CharToLong((char)0));
+ assertLongEquals(51L, $opt$CharToLong((char)51));
+ assertLongEquals(32767L, $opt$CharToLong((char)32767)); // 2^15 - 1
+ assertLongEquals(65535L, $opt$CharToLong((char)65535)); // 2^16 - 1
- assertEquals(0L, $opt$CharToLong('\u0000'));
- assertEquals(65535L, $opt$CharToLong('\uFFFF')); // (2^16) - 1
+ assertLongEquals(0L, $opt$CharToLong('\u0000'));
+ assertLongEquals(65535L, $opt$CharToLong('\uFFFF')); // 2^16 - 1
- assertEquals(65535L, $opt$CharToLong((char)-1));
- assertEquals(65485L, $opt$CharToLong((char)-51));
- assertEquals(32769L, $opt$CharToLong((char)-32767)); // -(2^15) - 1
- assertEquals(32768L, $opt$CharToLong((char)-32768)); // -(2^15)
+ assertLongEquals(65535L, $opt$CharToLong((char)-1));
+ assertLongEquals(65485L, $opt$CharToLong((char)-51));
+ assertLongEquals(32769L, $opt$CharToLong((char)-32767)); // -(2^15 - 1)
+ assertLongEquals(32768L, $opt$CharToLong((char)-32768)); // -(2^15)
}
- // All these methods produce an int-to-long Dex instruction.
+ private static void longToInt() {
+ assertIntEquals(1, $opt$LongToInt(1L));
+ assertIntEquals(0, $opt$LongToInt(0L));
+ assertIntEquals(-1, $opt$LongToInt(-1L));
+ assertIntEquals(51, $opt$LongToInt(51L));
+ assertIntEquals(-51, $opt$LongToInt(-51L));
+ assertIntEquals(2147483647, $opt$LongToInt(2147483647L)); // 2^31 - 1
+ assertIntEquals(-2147483647, $opt$LongToInt(-2147483647L)); // -(2^31 - 1)
+ assertIntEquals(-2147483648, $opt$LongToInt(-2147483648L)); // -(2^31)
+ assertIntEquals(-2147483648, $opt$LongToInt(2147483648L)); // (2^31)
+ assertIntEquals(2147483647, $opt$LongToInt(-2147483649L)); // -(2^31 + 1)
+ assertIntEquals(-1, $opt$LongToInt(9223372036854775807L)); // 2^63 - 1
+ assertIntEquals(1, $opt$LongToInt(-9223372036854775807L)); // -(2^63 - 1)
+ assertIntEquals(0, $opt$LongToInt(-9223372036854775808L)); // -(2^63)
+
+ assertIntEquals(42, $opt$LongLiteralToInt());
+
+ // Ensure long-to-int conversions truncates values as expected.
+ assertLongEquals(1L, $opt$IntToLong($opt$LongToInt(4294967297L))); // 2^32 + 1
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(4294967296L))); // 2^32
+ assertLongEquals(-1L, $opt$IntToLong($opt$LongToInt(4294967295L))); // 2^32 - 1
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(0L)));
+ assertLongEquals(1L, $opt$IntToLong($opt$LongToInt(-4294967295L))); // -(2^32 - 1)
+ assertLongEquals(0L, $opt$IntToLong($opt$LongToInt(-4294967296L))); // -(2^32)
+ assertLongEquals(-1, $opt$IntToLong($opt$LongToInt(-4294967297L))); // -(2^32 + 1)
+ }
+
+ private static void shortToByte() {
+ assertByteEquals((byte)1, $opt$ShortToByte((short)1));
+ assertByteEquals((byte)0, $opt$ShortToByte((short)0));
+ assertByteEquals((byte)-1, $opt$ShortToByte((short)-1));
+ assertByteEquals((byte)51, $opt$ShortToByte((short)51));
+ assertByteEquals((byte)-51, $opt$ShortToByte((short)-51));
+ assertByteEquals((byte)127, $opt$ShortToByte((short)127)); // 2^7 - 1
+ assertByteEquals((byte)-127, $opt$ShortToByte((short)-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$ShortToByte((short)-128)); // -(2^7)
+ assertByteEquals((byte)-128, $opt$ShortToByte((short)128)); // 2^7
+ assertByteEquals((byte)127, $opt$ShortToByte((short)-129)); // -(2^7 + 1)
+ assertByteEquals((byte)-1, $opt$ShortToByte((short)32767)); // 2^15 - 1
+ assertByteEquals((byte)0, $opt$ShortToByte((short)-32768)); // -(2^15)
+ }
+
+ private static void intToByte() {
+ assertByteEquals((byte)1, $opt$IntToByte(1));
+ assertByteEquals((byte)0, $opt$IntToByte(0));
+ assertByteEquals((byte)-1, $opt$IntToByte(-1));
+ assertByteEquals((byte)51, $opt$IntToByte(51));
+ assertByteEquals((byte)-51, $opt$IntToByte(-51));
+ assertByteEquals((byte)127, $opt$IntToByte(127)); // 2^7 - 1
+ assertByteEquals((byte)-127, $opt$IntToByte(-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$IntToByte(-128)); // -(2^7)
+ assertByteEquals((byte)-128, $opt$IntToByte(128)); // 2^7
+ assertByteEquals((byte)127, $opt$IntToByte(-129)); // -(2^7 + 1)
+ assertByteEquals((byte)-1, $opt$IntToByte(2147483647)); // 2^31 - 1
+ assertByteEquals((byte)0, $opt$IntToByte(-2147483648)); // -(2^31)
+ }
+
+ private static void charToByte() {
+ assertByteEquals((byte)1, $opt$CharToByte((char)1));
+ assertByteEquals((byte)0, $opt$CharToByte((char)0));
+ assertByteEquals((byte)51, $opt$CharToByte((char)51));
+ assertByteEquals((byte)127, $opt$CharToByte((char)127)); // 2^7 - 1
+ assertByteEquals((byte)-128, $opt$CharToByte((char)128)); // 2^7
+ assertByteEquals((byte)-1, $opt$CharToByte((char)32767)); // 2^15 - 1
+ assertByteEquals((byte)-1, $opt$CharToByte((char)65535)); // 2^16 - 1
+
+ assertByteEquals((byte)0, $opt$CharToByte('\u0000'));
+ assertByteEquals((byte)-1, $opt$CharToByte('\uFFFF')); // 2^16 - 1
+
+ assertByteEquals((byte)-1, $opt$CharToByte((char)-1));
+ assertByteEquals((byte)-51, $opt$CharToByte((char)-51));
+ assertByteEquals((byte)-127, $opt$CharToByte((char)-127)); // -(2^7 - 1)
+ assertByteEquals((byte)-128, $opt$CharToByte((char)-128)); // -(2^7)
+ assertByteEquals((byte)127, $opt$CharToByte((char)-129)); // -(2^7 + 1)
+ }
+
+ private static void byteToChar() {
+ assertCharEquals((char)1, $opt$ByteToChar((byte)1));
+ assertCharEquals((char)0, $opt$ByteToChar((byte)0));
+ assertCharEquals((char)65535, $opt$ByteToChar((byte)-1));
+ assertCharEquals((char)51, $opt$ByteToChar((byte)51));
+ assertCharEquals((char)65485, $opt$ByteToChar((byte)-51));
+ assertCharEquals((char)127, $opt$ByteToChar((byte)127)); // 2^7 - 1
+ assertCharEquals((char)65409, $opt$ByteToChar((byte)-127)); // -(2^7 - 1)
+ assertCharEquals((char)65408, $opt$ByteToChar((byte)-128)); // -(2^7)
+ }
+
+ private static void shortToChar() {
+ assertCharEquals((char)1, $opt$ShortToChar((short)1));
+ assertCharEquals((char)0, $opt$ShortToChar((short)0));
+ assertCharEquals((char)65535, $opt$ShortToChar((short)-1));
+ assertCharEquals((char)51, $opt$ShortToChar((short)51));
+ assertCharEquals((char)65485, $opt$ShortToChar((short)-51));
+ assertCharEquals((char)32767, $opt$ShortToChar((short)32767)); // 2^15 - 1
+ assertCharEquals((char)32769, $opt$ShortToChar((short)-32767)); // -(2^15 - 1)
+ assertCharEquals((char)32768, $opt$ShortToChar((short)-32768)); // -(2^15)
+ }
+
+ private static void intToChar() {
+ assertCharEquals((char)1, $opt$IntToChar(1));
+ assertCharEquals((char)0, $opt$IntToChar(0));
+ assertCharEquals((char)65535, $opt$IntToChar(-1));
+ assertCharEquals((char)51, $opt$IntToChar(51));
+ assertCharEquals((char)65485, $opt$IntToChar(-51));
+ assertCharEquals((char)32767, $opt$IntToChar(32767)); // 2^15 - 1
+ assertCharEquals((char)32769, $opt$IntToChar(-32767)); // -(2^15 - 1)
+ assertCharEquals((char)32768, $opt$IntToChar(32768)); // 2^15
+ assertCharEquals((char)32768, $opt$IntToChar(-32768)); // -(2^15)
+ assertCharEquals((char)65535, $opt$IntToChar(65535)); // 2^16 - 1
+ assertCharEquals((char)1, $opt$IntToChar(-65535)); // -(2^16 - 1)
+ assertCharEquals((char)0, $opt$IntToChar(65536)); // 2^16
+ assertCharEquals((char)0, $opt$IntToChar(-65536)); // -(2^16)
+ assertCharEquals((char)65535, $opt$IntToChar(2147483647)); // 2^31 - 1
+ assertCharEquals((char)0, $opt$IntToChar(-2147483648)); // -(2^31)
+ }
+
+
+ // These methods produce int-to-long Dex instructions.
static long $opt$ByteToLong(byte a) { return a; }
static long $opt$ShortToLong(short a) { return a; }
static long $opt$IntToLong(int a) { return a; }
static long $opt$CharToLong(int a) { return a; }
+
+ // These methods produce long-to-int Dex instructions.
+ static int $opt$LongToInt(long a){ return (int)a; }
+ static int $opt$LongLiteralToInt(){ return (int)42L; }
+
+ // These methods produce int-to-byte Dex instructions.
+ static byte $opt$ShortToByte(short a){ return (byte)a; }
+ static byte $opt$IntToByte(int a){ return (byte)a; }
+ static byte $opt$CharToByte(char a){ return (byte)a; }
+
+ // These methods produce int-to-char Dex instructions.
+ static char $opt$ByteToChar(byte a){ return (char)a; }
+ static char $opt$ShortToChar(short a){ return (char)a; }
+ static char $opt$IntToChar(int a){ return (char)a; }
}
diff --git a/test/424-checkcast/expected.txt b/test/424-checkcast/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/424-checkcast/expected.txt
diff --git a/test/424-checkcast/info.txt b/test/424-checkcast/info.txt
new file mode 100644
index 0000000..b50b082
--- /dev/null
+++ b/test/424-checkcast/info.txt
@@ -0,0 +1 @@
+Simple tests for the checkcast opcode.
diff --git a/test/424-checkcast/src/Main.java b/test/424-checkcast/src/Main.java
new file mode 100644
index 0000000..791b166
--- /dev/null
+++ b/test/424-checkcast/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static Object a;
+
+ public static Object $opt$CheckCastMain() {
+ return (Main)a;
+ }
+
+ public static Object $opt$CheckCastFinalClass() {
+ return (FinalClass)a;
+ }
+
+ public static void main(String[] args) {
+ $opt$TestMain();
+ $opt$TestFinalClass();
+ }
+
+ public static void $opt$TestMain() {
+ a = new Main();
+ $opt$CheckCastMain();
+
+ a = null;
+ $opt$CheckCastMain();
+
+ a = new MainChild();
+ $opt$CheckCastMain();
+
+ a = new Object();
+ try {
+ $opt$CheckCastMain();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ public static void $opt$TestFinalClass() {
+ a = new FinalClass();
+ $opt$CheckCastFinalClass();
+
+ a = null;
+ $opt$CheckCastFinalClass();
+
+ a = new Main();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+
+ a = new Object();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ static class MainChild extends Main {}
+
+ static final class FinalClass {}
+}
diff --git a/test/425-invoke-super/expected.txt b/test/425-invoke-super/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/425-invoke-super/expected.txt
diff --git a/test/425-invoke-super/info.txt b/test/425-invoke-super/info.txt
new file mode 100644
index 0000000..ad99030
--- /dev/null
+++ b/test/425-invoke-super/info.txt
@@ -0,0 +1 @@
+Tests the invoke-super opcode.
diff --git a/test/425-invoke-super/smali/invokesuper.smali b/test/425-invoke-super/smali/invokesuper.smali
new file mode 100644
index 0000000..ab13091
--- /dev/null
+++ b/test/425-invoke-super/smali/invokesuper.smali
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.class public LInvokeSuper;
+.super LSuperClass;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LSuperClass;-><init>()V
+ return-void
+.end method
+
+
+.method public run()I
+.registers 2
+ # Do an invoke super on a non-super class to force slow path.
+ invoke-super {v1}, LInvokeSuper;->returnInt()I
+ move-result v0
+ return v0
+.end method
+
+
+.method public returnInt()I
+.registers 2
+ const v0, 777
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/subclass.smali b/test/425-invoke-super/smali/subclass.smali
new file mode 100644
index 0000000..54e3474
--- /dev/null
+++ b/test/425-invoke-super/smali/subclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSubClass;
+.super LInvokeSuper;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LInvokeSuper;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 0
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/superclass.smali b/test/425-invoke-super/smali/superclass.smali
new file mode 100644
index 0000000..b366aa7
--- /dev/null
+++ b/test/425-invoke-super/smali/superclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSuperClass;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 42
+ return v0
+.end method
diff --git a/test/425-invoke-super/src/Main.java b/test/425-invoke-super/src/Main.java
new file mode 100644
index 0000000..1fb62d0
--- /dev/null
+++ b/test/425-invoke-super/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ static class A {
+ public int foo() { return 1; }
+ }
+
+ static class B extends A {
+ public int $opt$bar() { return super.foo(); }
+ }
+
+ static class C extends B {
+ public int foo() { return 42; }
+ }
+
+ static class D extends C {
+ }
+
+ static void assertEquals(int expected, int value) {
+ if (expected != value) {
+ throw new Error("Expected " + expected + ", got " + value);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ assertEquals(1, new B().$opt$bar());
+ assertEquals(1, new C().$opt$bar());
+ assertEquals(1, new D().$opt$bar());
+
+ Class<?> c = Class.forName("InvokeSuper");
+ Method m = c.getMethod("run");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+
+ c = Class.forName("SubClass");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+ }
+}
diff --git a/test/426-monitor/expected.txt b/test/426-monitor/expected.txt
new file mode 100644
index 0000000..2ffeff4
--- /dev/null
+++ b/test/426-monitor/expected.txt
@@ -0,0 +1,5 @@
+In static method
+In instance method
+In synchronized block
+In second instance method
+In second static method
diff --git a/test/426-monitor/info.txt b/test/426-monitor/info.txt
new file mode 100644
index 0000000..1b093ea
--- /dev/null
+++ b/test/426-monitor/info.txt
@@ -0,0 +1 @@
+Simple tests for monitorenter/monitorexit.
diff --git a/test/426-monitor/src/Main.java b/test/426-monitor/src/Main.java
new file mode 100644
index 0000000..a073a95
--- /dev/null
+++ b/test/426-monitor/src/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ $opt$StaticSynchronizedMethod();
+ new Main().$opt$InstanceSynchronizedMethod();
+ $opt$SynchronizedBlock();
+ new Main().$opt$DoubleInstanceSynchronized();
+ $opt$DoubleStaticSynchronized();
+ }
+
+ public static synchronized void $opt$StaticSynchronizedMethod() {
+ System.out.println("In static method");
+ }
+
+ public synchronized void $opt$InstanceSynchronizedMethod() {
+ System.out.println("In instance method");
+ }
+
+ public static void $opt$SynchronizedBlock() {
+ Object o = new Object();
+ synchronized(o) {
+ System.out.println("In synchronized block");
+ }
+ }
+
+ public synchronized void $opt$DoubleInstanceSynchronized() {
+ synchronized (this) {
+ System.out.println("In second instance method");
+ }
+ }
+
+ public synchronized static void $opt$DoubleStaticSynchronized() {
+ synchronized (Main.class) {
+ System.out.println("In second static method");
+ }
+ }
+}
diff --git a/test/427-bitwise/expected.txt b/test/427-bitwise/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/427-bitwise/expected.txt
diff --git a/test/427-bitwise/info.txt b/test/427-bitwise/info.txt
new file mode 100644
index 0000000..4762847
--- /dev/null
+++ b/test/427-bitwise/info.txt
@@ -0,0 +1 @@
+Tests for the and/or/xor opcodes.
diff --git a/test/427-bitwise/src/Main.java b/test/427-bitwise/src/Main.java
new file mode 100644
index 0000000..e984066
--- /dev/null
+++ b/test/427-bitwise/src/Main.java
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ andInt();
+ andLong();
+
+ orInt();
+ orLong();
+
+ xorInt();
+ xorLong();
+ }
+
+ private static void andInt() {
+ expectEquals(1, $opt$And(5, 3));
+ expectEquals(0, $opt$And(0, 0));
+ expectEquals(0, $opt$And(0, 3));
+ expectEquals(0, $opt$And(3, 0));
+ expectEquals(1, $opt$And(1, -3));
+ expectEquals(-12, $opt$And(-12, -3));
+
+ expectEquals(1, $opt$AndLit8(1));
+ expectEquals(0, $opt$AndLit8(0));
+ expectEquals(0, $opt$AndLit8(0));
+ expectEquals(3, $opt$AndLit8(3));
+ expectEquals(4, $opt$AndLit8(-12));
+
+ expectEquals(0, $opt$AndLit16(1));
+ expectEquals(0, $opt$AndLit16(0));
+ expectEquals(0, $opt$AndLit16(0));
+ expectEquals(0, $opt$AndLit16(3));
+ expectEquals(65280, $opt$AndLit16(-12));
+ }
+
+ private static void andLong() {
+ expectEquals(1L, $opt$And(5L, 3L));
+ expectEquals(0L, $opt$And(0L, 0L));
+ expectEquals(0L, $opt$And(0L, 3L));
+ expectEquals(0L, $opt$And(3L, 0L));
+ expectEquals(1L, $opt$And(1L, -3L));
+ expectEquals(-12L, $opt$And(-12L, -3L));
+
+ expectEquals(1L, $opt$AndLit8(1L));
+ expectEquals(0L, $opt$AndLit8(0L));
+ expectEquals(0L, $opt$AndLit8(0L));
+ expectEquals(3L, $opt$AndLit8(3L));
+ expectEquals(4L, $opt$AndLit8(-12L));
+
+ expectEquals(0L, $opt$AndLit16(1L));
+ expectEquals(0L, $opt$AndLit16(0L));
+ expectEquals(0L, $opt$AndLit16(0L));
+ expectEquals(0L, $opt$AndLit16(3L));
+ expectEquals(65280L, $opt$AndLit16(-12L));
+ }
+
+ static int $opt$And(int a, int b) {
+ return a & b;
+ }
+
+ static int $opt$AndLit8(int a) {
+ return a & 0xF;
+ }
+
+ static int $opt$AndLit16(int a) {
+ return a & 0xFF00;
+ }
+
+ static long $opt$And(long a, long b) {
+ return a & b;
+ }
+
+ static long $opt$AndLit8(long a) {
+ return a & 0xF;
+ }
+
+ static long $opt$AndLit16(long a) {
+ return a & 0xFF00;
+ }
+
+ private static void orInt() {
+ expectEquals(7, $opt$Or(5, 3));
+ expectEquals(0, $opt$Or(0, 0));
+ expectEquals(3, $opt$Or(0, 3));
+ expectEquals(3, $opt$Or(3, 0));
+ expectEquals(-3, $opt$Or(1, -3));
+ expectEquals(-3, $opt$Or(-12, -3));
+
+ expectEquals(15, $opt$OrLit8(1));
+ expectEquals(15, $opt$OrLit8(0));
+ expectEquals(15, $opt$OrLit8(3));
+ expectEquals(-1, $opt$OrLit8(-12));
+
+ expectEquals(0xFF01, $opt$OrLit16(1));
+ expectEquals(0xFF00, $opt$OrLit16(0));
+ expectEquals(0xFF03, $opt$OrLit16(3));
+ expectEquals(-12, $opt$OrLit16(-12));
+ }
+
+ private static void orLong() {
+ expectEquals(7L, $opt$Or(5L, 3L));
+ expectEquals(0L, $opt$Or(0L, 0L));
+ expectEquals(3L, $opt$Or(0L, 3L));
+ expectEquals(3L, $opt$Or(3L, 0L));
+ expectEquals(-3L, $opt$Or(1L, -3L));
+ expectEquals(-3L, $opt$Or(-12L, -3L));
+
+ expectEquals(15L, $opt$OrLit8(1L));
+ expectEquals(15L, $opt$OrLit8(0L));
+ expectEquals(15L, $opt$OrLit8(3L));
+ expectEquals(-1L, $opt$OrLit8(-12L));
+
+ expectEquals(0xFF01L, $opt$OrLit16(1L));
+ expectEquals(0xFF00L, $opt$OrLit16(0L));
+ expectEquals(0xFF03L, $opt$OrLit16(3L));
+ expectEquals(-12L, $opt$OrLit16(-12L));
+ }
+
+ static int $opt$Or(int a, int b) {
+ return a | b;
+ }
+
+ static int $opt$OrLit8(int a) {
+ return a | 0xF;
+ }
+
+ static int $opt$OrLit16(int a) {
+ return a | 0xFF00;
+ }
+
+ static long $opt$Or(long a, long b) {
+ return a | b;
+ }
+
+ static long $opt$OrLit8(long a) {
+ return a | 0xF;
+ }
+
+ static long $opt$OrLit16(long a) {
+ return a | 0xFF00;
+ }
+
+ private static void xorInt() {
+ expectEquals(6, $opt$Xor(5, 3));
+ expectEquals(0, $opt$Xor(0, 0));
+ expectEquals(3, $opt$Xor(0, 3));
+ expectEquals(3, $opt$Xor(3, 0));
+ expectEquals(-4, $opt$Xor(1, -3));
+ expectEquals(9, $opt$Xor(-12, -3));
+
+ expectEquals(14, $opt$XorLit8(1));
+ expectEquals(15, $opt$XorLit8(0));
+ expectEquals(12, $opt$XorLit8(3));
+ expectEquals(-5, $opt$XorLit8(-12));
+
+ expectEquals(0xFF01, $opt$XorLit16(1));
+ expectEquals(0xFF00, $opt$XorLit16(0));
+ expectEquals(0xFF03, $opt$XorLit16(3));
+ expectEquals(-0xFF0c, $opt$XorLit16(-12));
+ }
+
+ private static void xorLong() {
+ expectEquals(6L, $opt$Xor(5L, 3L));
+ expectEquals(0L, $opt$Xor(0L, 0L));
+ expectEquals(3L, $opt$Xor(0L, 3L));
+ expectEquals(3L, $opt$Xor(3L, 0L));
+ expectEquals(-4L, $opt$Xor(1L, -3L));
+ expectEquals(9L, $opt$Xor(-12L, -3L));
+
+ expectEquals(14L, $opt$XorLit8(1L));
+ expectEquals(15L, $opt$XorLit8(0L));
+ expectEquals(12L, $opt$XorLit8(3L));
+ expectEquals(-5L, $opt$XorLit8(-12L));
+
+ expectEquals(0xFF01L, $opt$XorLit16(1L));
+ expectEquals(0xFF00L, $opt$XorLit16(0L));
+ expectEquals(0xFF03L, $opt$XorLit16(3L));
+ expectEquals(-0xFF0cL, $opt$XorLit16(-12L));
+ }
+
+ static int $opt$Xor(int a, int b) {
+ return a ^ b;
+ }
+
+ static int $opt$XorLit8(int a) {
+ return a ^ 0xF;
+ }
+
+ static int $opt$XorLit16(int a) {
+ return a ^ 0xFF00;
+ }
+
+ static long $opt$Xor(long a, long b) {
+ return a ^ b;
+ }
+
+ static long $opt$XorLit8(long a) {
+ return a ^ 0xF;
+ }
+
+ static long $opt$XorLit16(long a) {
+ return a ^ 0xFF00;
+ }
+}
diff --git a/test/427-bounds/expected.txt b/test/427-bounds/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/427-bounds/expected.txt
diff --git a/test/427-bounds/info.txt b/test/427-bounds/info.txt
new file mode 100644
index 0000000..8b8b957
--- /dev/null
+++ b/test/427-bounds/info.txt
@@ -0,0 +1,2 @@
+Regression test for the optimizing compiler that used to incorrectly pass
+index and/or length to the pThrowArrayBounds entrypoint.
diff --git a/test/427-bounds/src/Main.java b/test/427-bounds/src/Main.java
new file mode 100644
index 0000000..a2d84d2
--- /dev/null
+++ b/test/427-bounds/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ Exception exception = null;
+ try {
+ $opt$Throw(new int[1]);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ exception = e;
+ }
+
+ String exceptionMessage = exception.getMessage();
+
+ // Note that it's ART specific to emit the length.
+ if (exceptionMessage.contains("length")) {
+ if (!exceptionMessage.contains("length=1")) {
+ throw new Error("Wrong length in exception message");
+ }
+ }
+
+ // Note that it's ART specific to emit the index.
+ if (exceptionMessage.contains("index")) {
+ if (!exceptionMessage.contains("index=2")) {
+ throw new Error("Wrong index in exception message");
+ }
+ }
+ }
+
+ static void $opt$Throw(int[] array) {
+ // We fetch the length first, to ensure it is in EAX (on x86).
+ // The pThrowArrayBounds entrypoint expects the index in EAX and the
+ // length in ECX, and the optimizing compiler used to write to EAX
+ // before putting the length in ECX.
+ int length = array.length;
+ array[2] = 42;
+ }
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 4002fbf..f766b0a 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -1,3 +1,5 @@
b/17790197
+b/17978759
FloatBadArgReg
+negLong
Done!
diff --git a/test/800-smali/smali/b_17978759.smali b/test/800-smali/smali/b_17978759.smali
new file mode 100644
index 0000000..07bcae5
--- /dev/null
+++ b/test/800-smali/smali/b_17978759.smali
@@ -0,0 +1,28 @@
+.class public LB17978759;
+.super Ljava/lang/Object;
+
+ .method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+ .end method
+
+ .method public test()V
+ .registers 2
+
+ move-object v0, p0
+ # v0 and p0 alias
+ monitor-enter p0
+ # monitor-enter on p0
+ monitor-exit v0
+ # monitor-exit on v0, however, verifier doesn't track this and so this is
+ # a warning. Verifier will still think p0 is locked.
+
+ move-object v0, p0
+ # v0 will now appear locked.
+ monitor-enter v0
+ # Attempt to lock v0 twice is a verifier failure.
+ monitor-exit v0
+
+ return-void
+ .end method
diff --git a/test/800-smali/smali/negLong.smali b/test/800-smali/smali/negLong.smali
new file mode 100755
index 0000000..29d416e
--- /dev/null
+++ b/test/800-smali/smali/negLong.smali
@@ -0,0 +1,186 @@
+.class public LnegLong;
+.super Ljava/lang/Object;
+.source "negLong.java"
+# static fields
+.field public static final N:I = 0x64
+.field public static i:I
+# direct methods
+.method static constructor <clinit>()V
+ .registers 1
+ .prologue
+ .line 5
+ const/16 v0, 0x44da
+ sput v0, LnegLong;->i:I
+ return-void
+.end method
+.method public constructor <init>()V
+ .registers 1
+ .prologue
+ .line 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+.method public static checkSum1([S)J
+ .registers 7
+ .prologue
+ .line 14
+ array-length v3, p0
+ .line 15
+ const-wide/16 v0, 0x0
+ .line 16
+ const/4 v2, 0x0
+ :goto_4
+ if-ge v2, v3, :cond_d
+ .line 17
+ aget-short v4, p0, v2
+ int-to-long v4, v4
+ add-long/2addr v0, v4
+ .line 16
+ add-int/lit8 v2, v2, 0x1
+ goto :goto_4
+ .line 18
+ :cond_d
+ return-wide v0
+.end method
+.method public static init1([SS)V
+ .registers 4
+ .prologue
+ .line 8
+ array-length v1, p0
+ .line 9
+ const/4 v0, 0x0
+ :goto_2
+ if-ge v0, v1, :cond_9
+ .line 10
+ aput-short p1, p0, v0
+ .line 9
+ add-int/lit8 v0, v0, 0x1
+ goto :goto_2
+ .line 11
+ :cond_9
+ return-void
+.end method
+.method public static main([Ljava/lang/String;)V
+ .registers 6
+ .prologue
+ .line 50
+ invoke-static {}, LnegLong;->negLong()J
+ move-result-wide v0
+ .line 51
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ new-instance v3, Ljava/lang/StringBuilder;
+ invoke-direct {v3}, Ljava/lang/StringBuilder;-><init>()V
+ const-string v4, "nbp ztw p = "
+ invoke-virtual {v3, v4}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ move-result-object v3
+ invoke-virtual {v3, v0, v1}, Ljava/lang/StringBuilder;->append(J)Ljava/lang/StringBuilder;
+ move-result-object v0
+ invoke-virtual {v0}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v2, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ .line 52
+ return-void
+.end method
+.method public static negLong()J
+ .registers 17
+ .prologue
+ .line 23
+ const-wide v1, -0x4c4a1f4aa9b1db83L
+ .line 24
+ const v7, -0x3f727efa
+ .line 25
+ const/16 v4, -0x284b
+ const v3, 0xdc01
+ .line 26
+ const/16 v0, 0x64
+ new-array v8, v0, [S
+ .line 28
+ const/16 v0, 0x1c60
+ invoke-static {v8, v0}, LnegLong;->init1([SS)V
+ .line 29
+ const/4 v0, 0x2
+ move v6, v0
+ :goto_18
+ const/16 v0, 0x56
+ if-ge v6, v0, :cond_64
+ .line 30
+ const/4 v0, 0x1
+ move v5, v0
+ move v0, v3
+ move-wide v15, v1
+ move-wide v2, v15
+ :goto_21
+ if-ge v5, v6, :cond_5d
+ .line 31
+ int-to-float v0, v4
+ neg-float v1, v7
+ add-float/2addr v0, v1
+ float-to-int v1, v0
+ .line 32
+ const/4 v0, 0x1
+ move v4, v1
+ move-wide v15, v2
+ move-wide v1, v15
+ .line 33
+ :goto_2b
+ add-int/lit8 v3, v0, 0x1
+ const/16 v0, 0x1b
+ if-ge v3, v0, :cond_3a
+ .line 35
+ int-to-long v9, v5
+ mul-long v0, v9, v1
+ neg-long v1, v0
+ .line 38
+ sget v0, LnegLong;->i:I
+ move v4, v0
+ move v0, v3
+ goto :goto_2b
+ .line 40
+ :cond_3a
+ aget-short v0, v8, v6
+ int-to-double v9, v0
+ long-to-double v11, v1
+ const-wide v13, 0x403f9851eb851eb8L
+ sub-double/2addr v11, v13
+ add-double/2addr v9, v11
+ double-to-int v0, v9
+ int-to-short v0, v0
+ aput-short v0, v8, v6
+ .line 41
+ const/4 v0, 0x2
+ :goto_4a
+ const/16 v9, 0x43
+ if-ge v0, v9, :cond_56
+ .line 42
+ neg-long v9, v1
+ const-wide/16 v11, 0x1
+ or-long/2addr v9, v11
+ add-long/2addr v1, v9
+ .line 41
+ add-int/lit8 v0, v0, 0x1
+ goto :goto_4a
+ .line 30
+ :cond_56
+ add-int/lit8 v0, v5, 0x1
+ move v5, v0
+ move v0, v3
+ move-wide v15, v1
+ move-wide v2, v15
+ goto :goto_21
+ .line 29
+ :cond_5d
+ add-int/lit8 v1, v6, 0x1
+ move v6, v1
+ move-wide v15, v2
+ move-wide v1, v15
+ move v3, v0
+ goto :goto_18
+ .line 45
+ :cond_64
+ invoke-static {v8}, LnegLong;->checkSum1([S)J
+ move-result-wide v0
+ int-to-long v2, v3
+ add-long/2addr v0, v2
+ .line 46
+ return-wide v0
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index c86470c..014edc0 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -15,6 +15,7 @@
*/
import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
import java.util.LinkedList;
import java.util.List;
@@ -49,8 +50,10 @@
testCases = new LinkedList<TestCase>();
testCases.add(new TestCase("b/17790197", "B17790197", "getInt", null, null, 100));
+ testCases.add(new TestCase("b/17978759", "B17978759", "test", null, new VerifyError(), null));
testCases.add(new TestCase("FloatBadArgReg", "FloatBadArgReg", "getInt",
new Object[]{100}, null, 100));
+ testCases.add(new TestCase("negLong", "negLong", "negLong", null, null, 122142L));
}
public void runTests() {
@@ -65,47 +68,59 @@
}
private void runTest(TestCase tc) throws Exception {
- Class<?> c = Class.forName(tc.testClass);
-
- Method[] methods = c.getDeclaredMethods();
-
- // For simplicity we assume that test methods are not overloaded. So searching by name
- // will give us the method we need to run.
- Method method = null;
- for (Method m : methods) {
- if (m.getName().equals(tc.testMethodName)) {
- method = m;
- break;
- }
- }
-
- if (method == null) {
- throw new IllegalArgumentException("Could not find test method " + tc.testMethodName +
- " in class " + tc.testClass + " for test " + tc.testName);
- }
-
Exception errorReturn = null;
try {
- Object retValue = method.invoke(null, tc.values);
- if (tc.expectedException != null) {
- errorReturn = new IllegalStateException("Expected an exception in test " +
- tc.testName);
+ Class<?> c = Class.forName(tc.testClass);
+
+ Method[] methods = c.getDeclaredMethods();
+
+ // For simplicity we assume that test methods are not overloaded. So searching by name
+ // will give us the method we need to run.
+ Method method = null;
+ for (Method m : methods) {
+ if (m.getName().equals(tc.testMethodName)) {
+ method = m;
+ break;
+ }
}
- if (tc.expectedReturn == null && retValue != null) {
- errorReturn = new IllegalStateException("Expected a null result in test " +
- tc.testName);
- } else if (tc.expectedReturn != null &&
- (retValue == null || !tc.expectedReturn.equals(retValue))) {
- errorReturn = new IllegalStateException("Expected return " + tc.expectedReturn +
- ", but got " + retValue);
+
+ if (method == null) {
+ errorReturn = new IllegalArgumentException("Could not find test method " +
+ tc.testMethodName + " in class " +
+ tc.testClass + " for test " +
+ tc.testName);
+ } else {
+ Object retValue;
+ if (Modifier.isStatic(method.getModifiers())) {
+ retValue = method.invoke(null, tc.values);
+ } else {
+ retValue = method.invoke(method.getDeclaringClass().newInstance(), tc.values);
+ }
+ if (tc.expectedException != null) {
+ errorReturn = new IllegalStateException("Expected an exception in test " +
+ tc.testName);
+ }
+ if (tc.expectedReturn == null && retValue != null) {
+ errorReturn = new IllegalStateException("Expected a null result in test " +
+ tc.testName);
+ } else if (tc.expectedReturn != null &&
+ (retValue == null || !tc.expectedReturn.equals(retValue))) {
+ errorReturn = new IllegalStateException("Expected return " +
+ tc.expectedReturn +
+ ", but got " + retValue);
+ } else {
+ // Expected result, do nothing.
+ }
}
- } catch (Exception exc) {
+ } catch (Throwable exc) {
if (tc.expectedException == null) {
errorReturn = new IllegalStateException("Did not expect exception", exc);
} else if (!tc.expectedException.getClass().equals(exc.getClass())) {
errorReturn = new IllegalStateException("Expected " +
- tc.expectedException.getClass().getName() +
- ", but got " + exc.getClass(), exc);
+ tc.expectedException.getClass().getName() +
+ ", but got " + exc.getClass(), exc);
+ } else {
+ // Expected exception, do nothing.
}
} finally {
if (errorReturn != null) {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e7a0439..3b949d6 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -177,15 +177,6 @@
TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
-TEST_ART_BROKEN_RUN_TESTS := \
- 004-ThreadStress
-
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(TEST_ART_BROKEN_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-
-TEST_ART_BROKEN_RUN_TESTS :=
-
# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
TEST_ART_BROKEN_PREBUILD_RUN_TESTS := \
116-nodex2oat
@@ -295,8 +286,7 @@
TEST_ART_BROKEN_NDEBUG_TESTS :=
# Known broken tests for the default compiler (Quick).
-TEST_ART_BROKEN_DEFAULT_RUN_TESTS := \
- 412-new-array
+TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
ifneq (,$(filter default,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -313,6 +303,7 @@
003-omnibus-opcodes \
004-InterfaceTest \
004-JniTest \
+ 004-NativeAllocations \
004-ReferenceMap \
004-SignalTest \
004-StackWalk \
@@ -321,8 +312,11 @@
006-args \
007-count10 \
008-exceptions \
+ 009-instanceof \
+ 010-instance \
011-array-copy \
013-math2 \
+ 014-math3 \
016-intern \
017-float \
018-stack-overflow \
@@ -332,6 +326,7 @@
022-interface \
023-many-interfaces \
024-illegal-access \
+ 025-access-controller \
026-access \
028-array-write \
029-assert \
@@ -339,6 +334,7 @@
031-class-attributes \
032-concrete-sub \
033-class-init-deadlock \
+ 034-call-null \
035-enum \
036-finalizer \
037-inherit \
@@ -358,7 +354,9 @@
054-uncaught \
055-enum-performance \
056-const-string-jumbo \
+ 058-enum-order \
061-out-of-memory \
+ 062-character-encodings \
063-process-manager \
064-field-access \
065-mismatched-implements \
@@ -399,14 +397,17 @@
105-invoke \
106-exceptions2 \
107-int-math2 \
+ 108-check-cast \
109-suspend-check \
110-field-access \
111-unresolvable-exception \
112-double-math \
113-multidex \
+ 114-ParallelGC \
117-nopatchoat \
118-noimage-dex2oat \
119-noimage-patchoat \
+ 120-hashcode \
121-modifiers \
121-simple-suspend-check \
122-npe \
@@ -419,6 +420,7 @@
300-package-override \
301-abstract-protected \
303-verification-stress \
+ 304-method-tracing \
401-optimizing-compiler \
402-optimizing-control-flow \
403-optimizing-long \
@@ -444,6 +446,10 @@
422-instanceof \
422-type-conversion \
423-invoke-interface \
+ 424-checkcast \
+ 426-monitor \
+ 427-bitwise \
+ 427-bounds \
700-LoadArgRegs \
701-easy-div-rem \
702-LargeBranchOffset \