Merge "X86 QBE: Mark kX86StartOfMethod as defining reg 0"
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index d90a31e..7e58f5c 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -156,15 +156,36 @@
# Enable float equality warnings.
art_clang_cflags += -Wfloat-equal
+# Enable warning of converting ints to void*.
+art_clang_cflags += -Wint-to-void-pointer-cast
+
+# GCC-only warnings.
+art_gcc_cflags := -Wunused-but-set-parameter
+# Suggest const: too many false positives, but good for a trial run.
+# -Wsuggest-attribute=const
+# Useless casts: too many, as we need to be 32/64 agnostic, but the compiler knows.
+# -Wuseless-cast
+# Zero-as-null: Have to convert all NULL and "diagnostic ignore" all includes like libnativehelper
+# that are still stuck pre-C++11.
+# -Wzero-as-null-pointer-constant \
+# Suggest final: Have to move to a more recent GCC.
+# -Wsuggest-final-types
+
+
ifeq ($(ART_HOST_CLANG),true)
ART_HOST_CFLAGS += $(art_clang_cflags)
+else
+ ART_HOST_CFLAGS += $(art_gcc_cflags)
endif
ifeq ($(ART_TARGET_CLANG),true)
ART_TARGET_CFLAGS += $(art_clang_cflags)
+else
+ ART_TARGET_CFLAGS += $(art_gcc_cflags)
endif
-# Clear local variable now its use has ended.
+# Clear local variables now their use has ended.
art_clang_cflags :=
+art_gcc_cflags :=
ART_CPP_EXTENSION := .cc
@@ -184,14 +205,19 @@
-Wall \
-Werror \
-Wextra \
- -Wno-sign-promo \
- -Wno-unused-parameter \
-Wstrict-aliasing \
-fstrict-aliasing \
-Wunreachable-code \
+ -Wno-conversion-null \
+ -Wredundant-decls \
+ -Wshadow \
-fvisibility=protected \
$(art_default_gc_type_cflags)
+# Missing declarations: too many at the moment, as we use "extern" quite a bit.
+# -Wmissing-declarations \
+
+
ifeq ($(ART_SMALL_MODE),true)
art_cflags += -DART_SMALL_MODE=1
endif
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9e640c6..75665b6 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -48,7 +48,7 @@
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod
-ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Nested
+ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
@@ -64,6 +64,7 @@
ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
+ART_GTEST_dex_method_iterator_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
@@ -82,7 +83,6 @@
runtime/base/stringprintf_test.cc \
runtime/base/timing_logger_test.cc \
runtime/base/unix_file/fd_file_test.cc \
- runtime/base/unix_file/mapped_file_test.cc \
runtime/base/unix_file/null_file_test.cc \
runtime/base/unix_file/random_access_file_utils_test.cc \
runtime/base/unix_file/string_file_test.cc \
@@ -111,6 +111,7 @@
runtime/indirect_reference_table_test.cc \
runtime/instruction_set_test.cc \
runtime/intern_table_test.cc \
+ runtime/interpreter/safe_math_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
@@ -200,7 +201,7 @@
LOCAL_CFLAGS := $(ART_TARGET_CFLAGS)
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
-LOCAL_SHARED_LIBRARIES := libcutils libartd libartd-compiler libdl
+LOCAL_SHARED_LIBRARIES := libartd libartd-compiler libdl
LOCAL_STATIC_LIBRARIES += libgtest_libc++
LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
@@ -216,8 +217,7 @@
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
LOCAL_SHARED_LIBRARIES := libartd libartd-compiler
-LOCAL_STATIC_LIBRARIES := libcutils
-LOCAL_STATIC_LIBRARIES += libgtest_libc++_host
+LOCAL_STATIC_LIBRARIES := libgtest_libc++_host
LOCAL_LDLIBS += -ldl -lpthread
LOCAL_MULTILIB := both
LOCAL_CLANG := $(ART_HOST_CLANG)
@@ -237,9 +237,15 @@
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
+ART_GTEST_TARGET_ANDROID_ROOT := '/system'
+ifneq ($(ART_TEST_ANDROID_ROOT),)
+ ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
+endif
+
# Define a make rule for a target device gtest.
# $(1): gtest name - the name of the test we're building such as leb128_test.
# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
+# $(3): LD_LIBRARY_PATH or undefined - used in case libartd.so is not in /system/lib/
define define-art-gtest-rule-target
gtest_rule := test-art-target-gtest-$(1)$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
@@ -249,7 +255,8 @@
$$(ART_GTEST_$(1)_TARGET_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
- $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
+ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar
.PHONY: $$(gtest_rule)
$$(gtest_rule): test-art-target-sync
@@ -257,7 +264,8 @@
$(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID
$(hide) adb shell chmod 755 $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
- (adb shell "$(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \
+ (adb shell "LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
+ $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \
&& (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \
&& $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@))
@@ -356,12 +364,22 @@
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
include $$(BUILD_EXECUTABLE)
+ library_path :=
+ 2nd_library_path :=
+ ifneq ($$(ART_TEST_ANDROID_ROOT),)
+ ifdef TARGET_2ND_ARCH
+ 2nd_library_path := $$(ART_TEST_ANDROID_ROOT)/lib
+ library_path := $$(ART_TEST_ANDROID_ROOT)/lib64
+ else
+ library_path := $$(ART_TEST_ANDROID_ROOT)/lib
+ endif
+ endif
ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
ifdef TARGET_2ND_ARCH
- $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),2ND_))
+ $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),2ND_,$$(2nd_library_path)))
endif
- $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),))
+ $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),,$$(library_path)))
# A rule to run the different architecture versions of the gtest.
.PHONY: test-art-target-gtest-$$(art_gtest_name)
@@ -373,8 +391,7 @@
else # host
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
- LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libz-host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
+ LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
@@ -409,6 +426,8 @@
art_gtest_extra_c_includes :=
art_gtest_extra_shared_libraries :=
art_gtest_name :=
+ library_path :=
+ 2nd_library_path :=
endef # define-art-gtest
ifeq ($(ART_BUILD_TARGET),true)
@@ -500,6 +519,7 @@
ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
+ART_GTEST_TARGET_ANDROID_ROOT :=
ART_GTEST_class_linker_test_DEX_DEPS :=
ART_GTEST_compiler_driver_test_DEX_DEPS :=
ART_GTEST_dex_file_test_DEX_DEPS :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index bded51b..9fe3807 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -35,9 +35,13 @@
core_oat_name :=
core_infix :=
core_pic_infix :=
+ core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
ifeq ($(1),optimizing)
core_compile_options += --compiler-backend=Optimizing
+ # With the optimizing compiler, we want to rerun dex2oat whenever there is
+ # a dex2oat change to catch regressions early.
+ core_dex2oat_dependency := $(DEX2OAT)
core_infix := -optimizing
endif
ifeq ($(1),interpreter)
@@ -79,7 +83,7 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(DEX2OAT_DEPENDENCY)
+$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
@echo "host dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
@@ -94,6 +98,7 @@
$$(core_oat_name): $$(core_image_name)
# Clean up locally used variables.
+ core_dex2oat_dependency :=
core_compile_options :=
core_image_name :=
core_oat_name :=
@@ -123,9 +128,19 @@
core_oat_name :=
core_infix :=
core_pic_infix :=
+ core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
ifeq ($(1),optimizing)
- core_compile_options += --compiler-backend=Optimizing
+ ifeq ($($(3)TARGET_ARCH),arm64)
+ # TODO: Enable image generation on arm64 once the backend
+ # is on par with other architectures.
+ core_compile_options += --compiler-backend=Quick
+ else
+ core_compile_options += --compiler-backend=Optimizing
+ # With the optimizing compiler, we want to rerun dex2oat whenever there is
+ # a dex2oat change to catch regressions early.
+ core_dex2oat_dependency := $(DEX2OAT)
+ endif
core_infix := -optimizing
endif
ifeq ($(1),interpreter)
@@ -171,7 +186,7 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(DEX2OAT_DEPENDENCY)
+$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
@echo "target dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
@@ -181,11 +196,12 @@
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(3)TARGET_ARCH) \
--instruction-set-features=$$($(3)TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$$(PRODUCT_OUT)/system --include-patch-information \
- $$(PRIVATE_CORE_COMPILE_OPTIONS)
+ $$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
$$(core_oat_name): $$(core_image_name)
# Clean up locally used variables.
+ core_dex2oat_dependency :=
core_compile_options :=
core_image_name :=
core_oat_name :=
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 172c96c..610f453 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -58,22 +58,22 @@
dex/quick/x86/target_x86.cc \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
- dex/mir_dataflow.cc \
- dex/mir_field_info.cc \
- dex/mir_method_info.cc \
- dex/mir_optimization.cc \
dex/bb_optimizations.cc \
dex/compiler_ir.cc \
+ dex/frontend.cc \
+ dex/mir_analysis.cc \
+ dex/mir_dataflow.cc \
+ dex/mir_field_info.cc \
+ dex/mir_graph.cc \
+ dex/mir_method_info.cc \
+ dex/mir_optimization.cc \
dex/post_opt_passes.cc \
dex/pass_driver_me_opts.cc \
dex/pass_driver_me_post_opt.cc \
- dex/frontend.cc \
- dex/mir_graph.cc \
- dex/mir_analysis.cc \
+ dex/ssa_transformation.cc \
dex/verified_method.cc \
dex/verification_results.cc \
dex/vreg_analysis.cc \
- dex/ssa_transformation.cc \
dex/quick_compiler_callbacks.cc \
driver/compiler_driver.cc \
driver/dex_compilation_unit.cc \
@@ -133,6 +133,7 @@
file_output_stream.cc \
image_writer.cc \
oat_writer.cc \
+ output_stream.cc \
vector_output_stream.cc
ifeq ($(ART_SEA_IR_MODE),true)
@@ -168,7 +169,17 @@
endif
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/compiler_enums.h
+ dex/quick/arm/arm_lir.h \
+ dex/quick/arm64/arm64_lir.h \
+ dex/quick/mips/mips_lir.h \
+ dex/quick/resource_mask.h \
+ dex/compiler_enums.h \
+ dex/global_value_numbering.h \
+ dex/pass_me.h \
+ driver/compiler_driver.h \
+ driver/compiler_options.h \
+ optimizing/locations.h \
+ utils/arm/constants_arm.h
# $(1): target or host
# $(2): ndebug or debug
@@ -233,9 +244,6 @@
endif
endif
- # TODO: clean up the compilers and remove this.
- LOCAL_CFLAGS += -Wno-unused-parameter
-
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
LOCAL_SHARED_LIBRARIES += libLLVM
LOCAL_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
@@ -261,15 +269,18 @@
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
ifeq ($$(art_target_or_host),host)
- LOCAL_LDLIBS += -ldl -lpthread
+ # For compiler driver TLS.
+ LOCAL_LDLIBS += -lpthread
endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+ # Vixl assembly support for ARM64 targets.
+ LOCAL_SHARED_LIBRARIES += libvixl
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libvixl
+ # For atrace.
+ LOCAL_SHARED_LIBRARIES += libcutils
include $(BUILD_SHARED_LIBRARY)
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
endif
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 7e19e15..bfdb537 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -144,8 +144,7 @@
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(
- runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index cdae8d2..0361cd1 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -162,7 +162,7 @@
}
this->resize(i + 1);
- for (size_t i = size(); --i >= 1; ) {
+ for (i = size(); --i >= 1; ) {
(*this)[i].from_ -= (*this)[i-1].from_;
(*this)[i].to_ -= (*this)[i-1].to_;
}
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 36213ca..b9fcf5b 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -25,13 +25,24 @@
namespace art {
#ifdef ART_SEA_IR_MODE
-extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
+constexpr bool kCanUseSeaIR = true;
+#else
+constexpr bool kCanUseSeaIR = false;
+#endif
+
+extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t access_flags ATTRIBUTE_UNUSED,
+ art::InvokeType invoke_type ATTRIBUTE_UNUSED,
+ uint16_t class_def_idx ATTRIBUTE_UNUSED,
+ uint32_t method_idx ATTRIBUTE_UNUSED,
+ jobject class_loader ATTRIBUTE_UNUSED,
+ const art::DexFile& dex_file ATTRIBUTE_UNUSED)
+#ifdef ART_SEA_IR_MODE
+; // NOLINT(whitespace/semicolon)
+#else
+{
+ UNREACHABLE();
+}
#endif
@@ -42,19 +53,18 @@
uint32_t method_idx,
jobject class_loader,
const art::DexFile& dex_file) {
-#ifdef ART_SEA_IR_MODE
- bool use_sea = (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
- if (use_sea) {
- LOG(INFO) << "Using SEA IR to compile..." << std::endl;
- return SeaIrCompileMethod(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
+ bool use_sea = kCanUseSeaIR &&
+ (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
+ if (use_sea) {
+ LOG(INFO) << "Using SEA IR to compile..." << std::endl;
+ return SeaIrCompileMethod(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
}
-#endif
return nullptr;
}
@@ -79,4 +89,26 @@
}
}
+bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item,
+ uint32_t method_idx,
+ const DexFile& dex_file) {
+ /*
+ * Skip compilation for pathologically large methods - either by instruction count or num vregs.
+ * Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter
+ * of that, which also guarantees we cannot overflow our 16-bit internal Quick SSA name space.
+ */
+ if (code_item.insns_size_in_code_units_ >= UINT16_MAX / 4) {
+ LOG(INFO) << "Method exceeds compiler instruction limit: "
+ << code_item.insns_size_in_code_units_
+ << " in " << PrettyMethod(method_idx, dex_file);
+ return true;
+ }
+ if (code_item.registers_size_ >= UINT16_MAX / 4) {
+ LOG(INFO) << "Method exceeds compiler virtual register limit: "
+ << code_item.registers_size_ << " in " << PrettyMethod(method_idx, dex_file);
+ return true;
+ }
+ return false;
+}
+
} // namespace art
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 05fa858..c2c15ff 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -115,6 +115,7 @@
*/
virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
const {
+ UNUSED(driver);
return nullptr;
}
@@ -122,6 +123,12 @@
return nullptr;
}
+ // Returns whether the method to compile is such a pathological case that
+ // it's not worth compiling.
+ static bool IsPathologicalCase(const DexFile::CodeItem& code_item,
+ uint32_t method_idx,
+ const DexFile& dex_file);
+
protected:
explicit Compiler(CompilerDriver* driver, uint64_t warning) :
driver_(driver), maximum_compilation_time_before_warning_(warning) {
diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h
index cab3427..9cad933 100644
--- a/compiler/dex/backend.h
+++ b/compiler/dex/backend.h
@@ -38,7 +38,7 @@
/*
* Return the number of reservable vector registers supported
- * @param long_or_fp ‘true’ if floating point computations will be
+ * @param long_or_fp, true if floating point computations will be
* executed or the operations will be long type while vector
* registers are reserved.
* @return the number of vector registers that are available
@@ -46,7 +46,10 @@
* are held back to generate scalar code without exhausting vector
* registers, if scalar code also uses the vector registers.
*/
- virtual int NumReservableVectorRegisters(bool long_or_fp) { return 0; }
+ virtual int NumReservableVectorRegisters(bool long_or_fp) {
+ UNUSED(long_or_fp);
+ return 0;
+ }
protected:
explicit Backend(ArenaAllocator* arena) : arena_(arena) {}
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 0b76999..1297ba9 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -28,6 +28,7 @@
kRefReg,
kAnyReg,
};
+std::ostream& operator<<(std::ostream& os, const RegisterClass& rhs);
enum BitsUsed {
kSize32Bits,
@@ -82,6 +83,7 @@
kLocCompilerTemp,
kLocInvalid
};
+std::ostream& operator<<(std::ostream& os, const RegLocationType& rhs);
enum BBType {
kNullBlock,
@@ -91,6 +93,7 @@
kExceptionHandling,
kDead,
};
+std::ostream& operator<<(std::ostream& os, const BBType& code);
// Shared pseudo opcodes - must be < 0.
enum LIRPseudoOpcode {
@@ -111,6 +114,7 @@
kPseudoEHBlockLabel = -2,
kPseudoNormalBlockLabel = -1,
};
+std::ostream& operator<<(std::ostream& os, const LIRPseudoOpcode& rhs);
enum ExtendedMIROpcode {
kMirOpFirst = kNumPackedOpcodes,
@@ -334,6 +338,7 @@
kPackedSwitch,
kSparseSwitch,
};
+std::ostream& operator<<(std::ostream& os, const BlockListType& rhs);
enum AssemblerStatus {
kSuccess,
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f9a05c2..205a521 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -282,10 +282,11 @@
} // namespace art
extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item,
- uint32_t access_flags, art::InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const art::DexFile& dex_file,
- art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ uint32_t access_flags, art::InvokeType invoke_type,
+ uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
+ const art::DexFile& dex_file,
+ art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 3dc5655..a1e2caa 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -84,21 +84,8 @@
jobject class_loader, const DexFile& dex_file,
void* llvm_compilation_unit) {
VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
- /*
- * Skip compilation for pathologically large methods - either by instruction count or num vregs.
- * Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter
- * of that, which also guarantees we cannot overflow our 16-bit internal SSA name space.
- */
- if (code_item->insns_size_in_code_units_ >= UINT16_MAX / 4) {
- LOG(INFO) << "Method exceeds compiler instruction limit: "
- << code_item->insns_size_in_code_units_
- << " in " << PrettyMethod(method_idx, dex_file);
- return NULL;
- }
- if (code_item->registers_size_ >= UINT16_MAX / 4) {
- LOG(INFO) << "Method exceeds compiler virtual register limit: "
- << code_item->registers_size_ << " in " << PrettyMethod(method_idx, dex_file);
- return NULL;
+ if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
+ return nullptr;
}
if (!driver.GetCompilerOptions().IsCompilationEnabled()) {
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index a4a7602..72d1112 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -19,14 +19,14 @@
#include "base/macros.h"
#include "compiler_internals.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
class LocalValueNumbering;
class MirFieldInfo;
-class GlobalValueNumbering {
+class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
public:
enum Mode {
kModeGvn,
@@ -55,33 +55,17 @@
}
// Allow modifications.
- void StartPostProcessing() {
- DCHECK(Good());
- DCHECK_EQ(mode_, kModeGvn);
- mode_ = kModeGvnPostProcessing;
- }
+ void StartPostProcessing();
bool CanModify() const {
return modifications_allowed_ && Good();
}
- // GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
static constexpr uint16_t kNoValue = 0xffffu;
// Allocate a new value name.
- uint16_t NewValueName() {
- DCHECK_NE(mode_, kModeGvnPostProcessing);
- ++last_value_;
- return last_value_;
- }
+ uint16_t NewValueName();
// Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
@@ -228,7 +212,7 @@
}
CompilationUnit* const cu_;
- MIRGraph* mir_graph_;
+ MIRGraph* const mir_graph_;
ScopedArenaAllocator* const allocator_;
// The maximum number of nested loops that we accept for GVN.
@@ -270,6 +254,19 @@
DISALLOW_COPY_AND_ASSIGN(GlobalValueNumbering);
};
+std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
+
+inline void GlobalValueNumbering::StartPostProcessing() {
+ DCHECK(Good());
+ DCHECK_EQ(mode_, kModeGvn);
+ mode_ = kModeGvnPostProcessing;
+}
+
+inline uint16_t GlobalValueNumbering::NewValueName() {
+ DCHECK_NE(mode_, kModeGvnPostProcessing);
+ ++last_value_;
+ return last_value_;
+}
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a171d7c..c1ce2ac 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -107,7 +107,8 @@
class LocalValueNumbering::NonAliasingArrayVersions {
public:
- static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+ static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
uint16_t array) {
return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
}
@@ -129,8 +130,9 @@
gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t array) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
+ uint16_t array ATTRIBUTE_UNUSED) {
return false; // Not affected by global_memory_version_.
}
@@ -164,8 +166,9 @@
return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
}
- static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type, uint16_t location) {
+ static uint16_t LookupMergeValue(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED, uint16_t location) {
// If the location is non-aliasing in lvn, use the non-aliasing value.
uint16_t array = gvn->GetArrayLocationBase(location);
if (lvn->IsNonAliasingArray(array, type)) {
@@ -176,8 +179,11 @@
gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED) {
+ UNUSED(gvn);
+ UNUSED(type);
return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
}
@@ -781,9 +787,9 @@
if (same_version) {
// Find the first non-null values.
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it != (lvn->*map_ptr).end()) {
- cmp_values = &it->second;
+ auto value = (lvn->*map_ptr).find(key);
+ if (value != (lvn->*map_ptr).end()) {
+ cmp_values = &value->second;
break;
}
}
@@ -793,21 +799,21 @@
// field version and the values' memory_version_before_stores, last_stored_value
// and store_loc_set are identical.
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it == (lvn->*map_ptr).end()) {
+ auto value = (lvn->*map_ptr).find(key);
+ if (value == (lvn->*map_ptr).end()) {
if (cmp_values->memory_version_before_stores != kNoValue) {
same_version = false;
break;
}
- } else if (cmp_values->last_stored_value != it->second.last_stored_value ||
- cmp_values->memory_version_before_stores != it->second.memory_version_before_stores ||
- cmp_values->store_loc_set != it->second.store_loc_set) {
+ } else if (cmp_values->last_stored_value != value->second.last_stored_value ||
+ cmp_values->memory_version_before_stores != value->second.memory_version_before_stores ||
+ cmp_values->store_loc_set != value->second.store_loc_set) {
same_version = false;
break;
- } else if (it->second.last_load_memory_version != kNoValue) {
+ } else if (value->second.last_load_memory_version != kNoValue) {
DCHECK(load_memory_version_for_same_version == kNoValue ||
- load_memory_version_for_same_version == it->second.last_load_memory_version);
- load_memory_version_for_same_version = it->second.last_load_memory_version;
+ load_memory_version_for_same_version == value->second.last_load_memory_version);
+ load_memory_version_for_same_version = value->second.last_load_memory_version;
}
}
}
@@ -822,12 +828,12 @@
if (!cmp_values->load_value_map.empty()) {
my_values->load_value_map = cmp_values->load_value_map;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it == (lvn->*map_ptr).end() || it->second.load_value_map.empty()) {
+ auto value = (lvn->*map_ptr).find(key);
+ if (value == (lvn->*map_ptr).end() || value->second.load_value_map.empty()) {
my_values->load_value_map.clear();
break;
}
- InPlaceIntersectMaps(&my_values->load_value_map, it->second.load_value_map);
+ InPlaceIntersectMaps(&my_values->load_value_map, value->second.load_value_map);
if (my_values->load_value_map.empty()) {
break;
}
@@ -841,20 +847,20 @@
// Calculate the locations that have been either read from or written to in each incoming LVN.
bool first_lvn = true;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- auto it = (lvn->*map_ptr).find(key);
- if (it == (lvn->*map_ptr).end()) {
+ auto value = (lvn->*map_ptr).find(key);
+ if (value == (lvn->*map_ptr).end()) {
my_values->load_value_map.clear();
break;
}
if (first_lvn) {
first_lvn = false;
// Copy the first LVN's locations. Values will be overwritten later.
- my_values->load_value_map = it->second.load_value_map;
- for (uint16_t location : it->second.store_loc_set) {
+ my_values->load_value_map = value->second.load_value_map;
+ for (uint16_t location : value->second.store_loc_set) {
my_values->load_value_map.Put(location, 0u);
}
} else {
- IntersectAliasingValueLocations(my_values, &it->second);
+ IntersectAliasingValueLocations(my_values, &value->second);
}
}
// Calculate merged values for the intersection.
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index dd8d2db..979fd5a 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,7 @@
#include "compiler_internals.h"
#include "global_value_numbering.h"
-#include "utils/scoped_arena_allocator.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
@@ -31,7 +30,7 @@
// Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
-class LocalValueNumbering {
+class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
private:
static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
@@ -69,14 +68,6 @@
uint16_t GetValueNumber(MIR* mir);
- // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
// A set of value names.
typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
@@ -353,7 +344,7 @@
GlobalValueNumbering* gvn_;
// We're using the block id as a 16-bit operand value for some lookups.
- COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), BasicBlockId_must_be_16_bit);
+ static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "BasicBlockId must be 16 bit");
BasicBlockId id_;
SregValueMap sreg_value_map_;
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index e6a8cea..5b7ac3c 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1343,7 +1343,7 @@
* counts explicitly used s_regs. A later phase will add implicit
* counts for things such as Method*, null-checked references, etc.
*/
-void MIRGraph::CountUses(struct BasicBlock* bb) {
+void MIRGraph::CountUses(class BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index c628182..e97f7a0 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -130,7 +130,7 @@
kBitFastPut,
kIFieldLoweringInfoBitEnd
};
- COMPILE_ASSERT(kIFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kIFieldLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
@@ -198,7 +198,7 @@
kBitClassIsInDexCache,
kSFieldLoweringInfoBitEnd
};
- COMPILE_ASSERT(kSFieldLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kSFieldLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastGet = 1u << kBitFastGet;
static constexpr uint16_t kFlagFastPut = 1u << kBitFastPut;
static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e0f471e..b87ab66 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -302,28 +302,28 @@
* (by the caller)
* Utilizes a map for fast lookup of the typical cases.
*/
-BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
+BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
BasicBlock** immed_pred_block_p) {
if (code_offset >= current_code_item_->insns_size_in_code_units_) {
- return NULL;
+ return nullptr;
}
int block_id = dex_pc_to_block_map_[code_offset];
BasicBlock* bb = GetBasicBlock(block_id);
- if ((bb != NULL) && (bb->start_offset == code_offset)) {
+ if ((bb != nullptr) && (bb->start_offset == code_offset)) {
// Does this containing block start with the desired instruction?
return bb;
}
// No direct hit.
if (!create) {
- return NULL;
+ return nullptr;
}
- if (bb != NULL) {
+ if (bb != nullptr) {
// The target exists somewhere in an existing block.
- return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : NULL);
+ return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr);
}
// Create a new block.
@@ -360,8 +360,7 @@
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- FindBlock(address, false /* split */, true /*create*/,
- /* immed_pred_block_p */ NULL);
+ FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr);
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -466,7 +465,7 @@
LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
}
CountBranch(target);
- BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
+ BasicBlock* taken_block = FindBlock(target, /* create */ true,
/* immed_pred_block_p */ &cur_block);
cur_block->taken = taken_block->id;
taken_block->predecessors.push_back(cur_block->id);
@@ -474,19 +473,6 @@
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
BasicBlock* fallthrough_block = FindBlock(cur_offset + width,
- /*
- * If the method is processed
- * in sequential order from the
- * beginning, we don't need to
- * specify split for continue
- * blocks. However, this
- * routine can be called by
- * compileLoop, which starts
- * parsing the method from an
- * arbitrary address in the
- * method body.
- */
- true,
/* create */
true,
/* immed_pred_block_p */
@@ -494,8 +480,7 @@
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
return cur_block;
}
@@ -503,6 +488,7 @@
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags) {
+ UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
int size;
@@ -554,8 +540,8 @@
cur_block->successor_blocks.reserve(size);
for (i = 0; i < size; i++) {
- BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
- /* create */ true, /* immed_pred_block_p */ &cur_block);
+ BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true,
+ /* immed_pred_block_p */ &cur_block);
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
@@ -568,8 +554,8 @@
}
/* Fall-through case */
- BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
- /* create */ true, /* immed_pred_block_p */ NULL);
+ BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true,
+ /* immed_pred_block_p */ nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
@@ -579,6 +565,7 @@
BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags, ArenaBitVector* try_block_addr,
const uint16_t* code_ptr, const uint16_t* code_end) {
+ UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
@@ -593,8 +580,8 @@
}
for (; iterator.HasNext(); iterator.Next()) {
- BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
- false /* creat */, NULL /* immed_pred_block_p */);
+ BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
+ nullptr /* immed_pred_block_p */);
if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
// Don't allow monitor-exit to catch its own exception, http://b/15745363 .
@@ -629,8 +616,7 @@
cur_block->explicit_throw = true;
if (code_ptr < code_end) {
// Force creation of new block following THROW via side-effect.
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
if (!in_try_block) {
// Don't split a THROW that can't rethrow - we're done.
@@ -813,8 +799,7 @@
* Create a fallthrough block for real instructions
* (incl. NOP).
*/
- FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
} else if (flags & Instruction::kThrow) {
cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
@@ -837,8 +822,8 @@
}
}
current_offset_ += width;
- BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
- false, /* immed_pred_block_p */ NULL);
+ BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
+ /* immed_pred_block_p */ nullptr);
if (next_block) {
/*
* The next instruction could be the target of a previously parsed
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index fd4c473..a1d24e2 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -228,7 +228,8 @@
* The Midlevel Intermediate Representation node, which may be largely considered a
* wrapper around a Dalvik byte code.
*/
-struct MIR {
+class MIR : public ArenaObject<kArenaAllocMIR> {
+ public:
/*
* TODO: remove embedded DecodedInstruction to save space, keeping only opcode. Recover
* additional fields on as-needed basis. Question: how to support MIR Pseudo-ops; probably
@@ -344,16 +345,12 @@
MIR* Copy(CompilationUnit *c_unit);
MIR* Copy(MIRGraph* mir_Graph);
-
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
- }
- static void operator delete(void* p) {} // Nop.
};
struct SuccessorBlockInfo;
-struct BasicBlock {
+class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
+ public:
BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
: id(block_id),
dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
@@ -457,10 +454,8 @@
MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
bool IsExceptionBlock() const;
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
- }
- static void operator delete(void* p) {} // Nop.
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
/*
@@ -548,7 +543,7 @@
/* Find existing block */
BasicBlock* FindBlock(DexOffset code_offset) {
- return FindBlock(code_offset, false, false, NULL);
+ return FindBlock(code_offset, false, NULL);
}
const uint16_t* GetCurrentInsns() const {
@@ -627,7 +622,7 @@
return def_count_;
}
- ArenaAllocator* GetArena() {
+ ArenaAllocator* GetArena() const {
return arena_;
}
@@ -1135,7 +1130,7 @@
* @brief Count the uses in the BasicBlock
* @param bb the BasicBlock
*/
- void CountUses(struct BasicBlock* bb);
+ void CountUses(class BasicBlock* bb);
static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
static uint64_t GetDataFlowAttributes(MIR* mir);
@@ -1208,8 +1203,7 @@
bool ContentIsInsn(const uint16_t* code_ptr);
BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
BasicBlock** immed_pred_block_p);
- BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
- BasicBlock** immed_pred_block_p);
+ BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p);
void ProcessTryCatchBlocks();
bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
@@ -1233,7 +1227,7 @@
void ComputeDomPostOrderTraversal(BasicBlock* bb);
int GetSSAUseCount(int s_reg);
bool BasicBlockOpt(BasicBlock* bb);
- bool BuildExtendedBBList(struct BasicBlock* bb);
+ bool BuildExtendedBBList(class BasicBlock* bb);
bool FillDefBlockMatrix(BasicBlock* bb);
void InitializeDominationInfo(BasicBlock* bb);
bool ComputeblockIDom(BasicBlock* bb);
@@ -1266,7 +1260,7 @@
ArenaVector<BasicBlockId> dom_post_order_traversal_;
ArenaVector<BasicBlockId> topological_order_;
// Indexes in topological_order_ need to be only as big as the BasicBlockId.
- COMPILE_ASSERT(sizeof(BasicBlockId) == sizeof(uint16_t), assuming_16_bit_BasicBlockId);
+ static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "Assuming 16 bit BasicBlockId");
// For each loop head, remember the past-the-end index of the end of the loop. 0 if not loop head.
ArenaVector<uint16_t> topological_order_loop_ends_;
// Map BB ids to topological_order_ indexes. 0xffff if not included (hidden or null block).
@@ -1305,7 +1299,7 @@
int method_sreg_;
unsigned int attributes_;
Checkstats* checkstats_;
- ArenaAllocator* arena_;
+ ArenaAllocator* const arena_;
int backward_branches_;
int forward_branches_;
size_t num_non_special_compiler_temps_; // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index e64b028..08fb103 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -60,7 +60,7 @@
kBitIsStatic = 0,
kMethodInfoBitEnd
};
- COMPILE_ASSERT(kMethodInfoBitEnd <= 16, too_many_flags);
+ static_assert(kMethodInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
MirMethodInfo(uint16_t method_idx, uint16_t flags)
@@ -170,15 +170,15 @@
kBitClassIsInitialized,
kMethodLoweringInfoBitEnd
};
- COMPILE_ASSERT(kMethodLoweringInfoBitEnd <= 16, too_many_flags);
+ static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags");
static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
static constexpr uint16_t kInvokeTypeMask = 7u;
- COMPILE_ASSERT((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
- assert_invoke_type_bits_ok);
- COMPILE_ASSERT((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
- assert_sharp_type_bits_ok);
+ static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
+ "assert invoke type bits failed");
+ static_assert((1u << (kBitSharpTypeEnd - kBitSharpTypeBegin)) - 1u == kInvokeTypeMask,
+ "assert sharp type bits failed");
uintptr_t direct_code_;
uintptr_t direct_method_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 8e583cc..b35d51c 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -16,12 +16,11 @@
#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
+#include "dataflow_iterator-inl.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/global_value_numbering.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "quick/dex_file_method_inliner.h"
+#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
#include "utils/scoped_arena_containers.h"
@@ -215,8 +214,8 @@
kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
};
-COMPILE_ASSERT(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
- if_ccz_ccodes_size1);
+static_assert(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
+ "if_ccz_ccodes_size1");
static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
@@ -226,12 +225,12 @@
return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
}
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, check_if_eqz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, check_if_nez_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, check_if_ltz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, check_if_gez_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_gtz_ccode);
-COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, "if_eqz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, "if_nez ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, "if_ltz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, "if_gez ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, "if_gtz ccode");
+static_assert(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, "if_lez ccode");
int MIRGraph::GetSSAUseCount(int s_reg) {
DCHECK_LT(static_cast<size_t>(s_reg), ssa_subscripts_.size());
@@ -660,7 +659,7 @@
}
/* Collect stats on number of checks removed */
-void MIRGraph::CountChecks(struct BasicBlock* bb) {
+void MIRGraph::CountChecks(class BasicBlock* bb) {
if (bb->data_flow_info != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
if (mir->ssa_rep == NULL) {
@@ -750,7 +749,7 @@
}
/* Combine any basic blocks terminated by instructions that we now know can't throw */
-void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
+void MIRGraph::CombineBlocks(class BasicBlock* bb) {
// Loop here to allow combining a sequence of blocks
while ((bb->block_type == kDalvikByteCode) &&
(bb->last_mir_insn != nullptr) &&
@@ -1087,7 +1086,7 @@
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
- COMPILE_ASSERT(kMarkToIgnoreNullCheckShift > 0, check_valid_shift_right);
+ static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
(mir->optimization_flags & MIR_MARK) >> kMarkToIgnoreNullCheckShift;
mir->optimization_flags |= mirMarkAdjustedToIgnoreNullCheck;
@@ -1510,7 +1509,7 @@
}
}
-bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
+bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (bb->visited) return false;
if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock))) {
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index e349eed..d3e54a0 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -19,11 +19,13 @@
#include <string>
-#include "base/macros.h"
+#include "compiler_ir.h"
+#include "base/logging.h"
+
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -81,15 +83,10 @@
* @param data the object containing data necessary for the pass.
* @return whether or not there is a change when walking the BasicBlock
*/
- virtual bool Worker(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
-
+ virtual bool Worker(PassDataHolder* data ATTRIBUTE_UNUSED) const {
// Passes that do all their work in Start() or End() should not allow useless node iteration.
- DCHECK(false) << "Unsupported default Worker() used for " << GetName();
-
- // BasicBlock did not change.
- return false;
+ LOG(FATAL) << "Unsupported default Worker() used for " << GetName();
+ UNREACHABLE();
}
static void BasePrintMessage(CompilationUnit* c_unit, const char* pass_name, const char* message, ...) {
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index 2f3c8b2..d0b450a 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -23,7 +23,7 @@
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -32,10 +32,11 @@
* @details Each enum should be a power of 2 to be correctly used.
*/
enum OptimizationFlag {
- kOptimizationBasicBlockChange = 1, /**< @brief Has there been a change to a BasicBlock? */
- kOptimizationDefUsesChange = 2, /**< @brief Has there been a change to a def-use? */
- kLoopStructureChange = 4, /**< @brief Has there been a loop structural change? */
+ kOptimizationBasicBlockChange = 1, /// @brief Has there been a change to a BasicBlock?
+ kOptimizationDefUsesChange = 2, /// @brief Has there been a change to a def-use?
+ kLoopStructureChange = 4, /// @brief Has there been a loop structural change?
};
+std::ostream& operator<<(std::ostream& os, const OptimizationFlag& rhs);
// Data holder class.
class PassMEDataHolder: public PassDataHolder {
@@ -47,24 +48,25 @@
};
enum DataFlowAnalysisMode {
- kAllNodes = 0, /**< @brief All nodes. */
- kPreOrderDFSTraversal, /**< @brief Depth-First-Search / Pre-Order. */
- kRepeatingPreOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Pre-Order. */
- kReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Reverse Post-Order. */
- kRepeatingPostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Post-Order. */
- kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
- kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
- kTopologicalSortTraversal, /**< @brief Topological Order traversal. */
- kLoopRepeatingTopologicalSortTraversal, /**< @brief Loop-repeating Topological Order traversal. */
- kNoNodes, /**< @brief Skip BasicBlock traversal. */
+ kAllNodes = 0, /// @brief All nodes.
+ kPreOrderDFSTraversal, /// @brief Depth-First-Search / Pre-Order.
+ kRepeatingPreOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Pre-Order.
+ kReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Reverse Post-Order.
+ kRepeatingPostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Post-Order.
+ kRepeatingReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Reverse Post-Order.
+ kPostOrderDOMTraversal, /// @brief Dominator tree / Post-Order.
+ kTopologicalSortTraversal, /// @brief Topological Order traversal.
+ kLoopRepeatingTopologicalSortTraversal, /// @brief Loop-repeating Topological Order traversal.
+ kNoNodes, /// @brief Skip BasicBlock traversal.
};
+std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& rhs);
/**
* @class Pass
* @brief Pass is the Pass structure for the optimizations.
* @details The following structure has the different optimization passes that we are going to do.
*/
-class PassME: public Pass {
+class PassME : public Pass {
public:
explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
unsigned int flags = 0u, const char* dump = "")
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 94ae3f7..bc4f5c4 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -73,7 +73,7 @@
std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
};
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct MIR;
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 36cb7a4..b2db36d 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -109,7 +109,7 @@
kArmRegEnd = 48,
};
-enum ArmNativeRegisterPool {
+enum ArmNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
r0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
r1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
r2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -546,6 +546,7 @@
kThumb2StrdI8, // strd rt, rt2, [rn +-/1024].
kArmLast,
};
+std::ostream& operator<<(std::ostream& os, const ArmOpcode& rhs);
enum ArmOpDmbOptions {
kSY = 0xf,
@@ -577,6 +578,7 @@
kFmtOff24, // 24-bit Thumb2 unconditional branch encoding.
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const ArmEncodingKind& rhs);
// Struct used to define the snippet positions for each Thumb opcode.
struct ArmEncodingMap {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 13b9bf0..b4eebb3 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -475,9 +475,9 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx ATTRIBUTE_UNUSED,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 442c4fc..179ba02 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -196,7 +196,7 @@
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMonitorEnter(int opt_flags, RegLocation rl_src);
void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -251,10 +251,10 @@
RegStorage AllocPreservedDouble(int s_reg);
RegStorage AllocPreservedSingle(int s_reg);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index ce31b27..ebf1905 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -209,7 +209,8 @@
void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// TODO: Generalize the IT below to accept more than one-instruction loads.
DCHECK(InexpensiveConstantInt(true_val));
DCHECK(InexpensiveConstantInt(false_val));
@@ -232,6 +233,7 @@
}
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -504,6 +506,7 @@
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
@@ -687,14 +690,17 @@
}
RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -1072,6 +1078,7 @@
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
@@ -1168,108 +1175,109 @@
void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- /*
- * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
- * dest = src1.lo * src2.lo;
- * tmp1 += src1.lo * src2.hi;
- * dest.hi += tmp1;
- *
- * To pull off inline multiply, we have a worst-case requirement of 7 temporary
- * registers. Normally for Arm, we get 5. We can get to 6 by including
- * lr in the temp set. The only problematic case is all operands and result are
- * distinct, and none have been promoted. In that case, we can succeed by aggressively
- * freeing operand temp registers after they are no longer needed. All other cases
- * can proceed normally. We'll just punt on the case of the result having a misaligned
- * overlap with either operand and send that case to a runtime handler.
- */
- RegLocation rl_result;
- if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
- FlushAllRegs();
- CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
- rl_result = GetReturnWide(kCoreReg);
- StoreValueWide(rl_dest, rl_result);
- return;
- }
-
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
- int reg_status = 0;
- RegStorage res_lo;
- RegStorage res_hi;
- bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
- !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
- bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
- bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
- // Check if rl_dest is *not* either operand and we have enough temp registers.
- if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
- (dest_promoted || src1_promoted || src2_promoted)) {
- // In this case, we do not need to manually allocate temp registers for result.
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- res_lo = rl_result.reg.GetLow();
- res_hi = rl_result.reg.GetHigh();
- } else {
- res_lo = AllocTemp();
- if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
- // In this case, we have enough temp registers to be allocated for result.
- res_hi = AllocTemp();
- reg_status = 1;
- } else {
- // In this case, all temps are now allocated.
- // res_hi will be allocated after we can free src1_hi.
- reg_status = 2;
- }
- }
-
- // Temporarily add LR to the temp pool, and assign it to tmp1
- MarkTemp(rs_rARM_LR);
- FreeTemp(rs_rARM_LR);
- RegStorage tmp1 = rs_rARM_LR;
- LockTemp(rs_rARM_LR);
-
- if (rl_src1.reg == rl_src2.reg) {
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
- } else {
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- if (reg_status == 2) {
- DCHECK(!res_hi.Valid());
- DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
- DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- // Will force free src1_hi, so must clobber.
- Clobber(rl_src1.reg);
- FreeTemp(rl_src1.reg.GetHigh());
- res_hi = AllocTemp();
- }
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
- tmp1.GetReg());
- NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
- if (reg_status == 2) {
- FreeTemp(rl_src1.reg.GetLow());
- }
- }
-
- // Now, restore lr to its non-temp status.
- FreeTemp(tmp1);
- Clobber(rs_rARM_LR);
- UnmarkTemp(rs_rARM_LR);
-
- if (reg_status != 0) {
- // We had manually allocated registers for rl_result.
- // Now construct a RegLocation.
- rl_result = GetReturnWide(kCoreReg); // Just using as a template.
- rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
- }
-
+ UNUSED(opcode);
+ /*
+ * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
+ * dest = src1.lo * src2.lo;
+ * tmp1 += src1.lo * src2.hi;
+ * dest.hi += tmp1;
+ *
+ * To pull off inline multiply, we have a worst-case requirement of 7 temporary
+ * registers. Normally for Arm, we get 5. We can get to 6 by including
+ * lr in the temp set. The only problematic case is all operands and result are
+ * distinct, and none have been promoted. In that case, we can succeed by aggressively
+ * freeing operand temp registers after they are no longer needed. All other cases
+ * can proceed normally. We'll just punt on the case of the result having a misaligned
+ * overlap with either operand and send that case to a runtime handler.
+ */
+ RegLocation rl_result;
+ if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
+ FlushAllRegs();
+ CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(kCoreReg);
StoreValueWide(rl_dest, rl_result);
+ return;
+ }
+
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+ int reg_status = 0;
+ RegStorage res_lo;
+ RegStorage res_hi;
+ bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
+ !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
+ bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
+ bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
+ // Check if rl_dest is *not* either operand and we have enough temp registers.
+ if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
+ (dest_promoted || src1_promoted || src2_promoted)) {
+ // In this case, we do not need to manually allocate temp registers for result.
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ res_lo = rl_result.reg.GetLow();
+ res_hi = rl_result.reg.GetHigh();
+ } else {
+ res_lo = AllocTemp();
+ if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
+ // In this case, we have enough temp registers to be allocated for result.
+ res_hi = AllocTemp();
+ reg_status = 1;
+ } else {
+ // In this case, all temps are now allocated.
+ // res_hi will be allocated after we can free src1_hi.
+ reg_status = 2;
+ }
+ }
+
+ // Temporarily add LR to the temp pool, and assign it to tmp1
+ MarkTemp(rs_rARM_LR);
+ FreeTemp(rs_rARM_LR);
+ RegStorage tmp1 = rs_rARM_LR;
+ LockTemp(rs_rARM_LR);
+
+ if (rl_src1.reg == rl_src2.reg) {
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+ } else {
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ if (reg_status == 2) {
+ DCHECK(!res_hi.Valid());
+ DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ // Will force free src1_hi, so must clobber.
+ Clobber(rl_src1.reg);
+ FreeTemp(rl_src1.reg.GetHigh());
+ res_hi = AllocTemp();
+ }
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
+ tmp1.GetReg());
+ NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
+ if (reg_status == 2) {
+ FreeTemp(rl_src1.reg.GetLow());
+ }
+ }
+
+ // Now, restore lr to its non-temp status.
+ FreeTemp(tmp1);
+ Clobber(rs_rARM_LR);
+ UnmarkTemp(rs_rARM_LR);
+
+ if (reg_status != 0) {
+ // We had manually allocated registers for rl_result.
+ // Now construct a RegLocation.
+ rl_result = GetReturnWide(kCoreReg); // Just using as a template.
+ rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
+ }
+
+ StoreValueWide(rl_dest, rl_result);
}
void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -1471,6 +1479,7 @@
void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
int flags) {
+ UNUSED(flags);
rl_src = LoadValueWide(rl_src, kCoreReg);
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7100a28..0e8f645 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -559,11 +559,10 @@
call_method_insns_.reserve(100);
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kArmLast; i++) {
- if (ArmMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(ArmMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
}
}
@@ -904,8 +903,8 @@
const RegStorage fpArgMappingToPhysicalReg[] =
{rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15};
- const uint32_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
- COMPILE_ASSERT(fpArgMappingToPhysicalRegSize % 2 == 0, knum_of_fp_arg_regs_not_even);
+ constexpr uint32_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+ static_assert(fpArgMappingToPhysicalRegSize % 2 == 0, "Number of FP Arg regs is not even");
if (kArm32QuickCodeUseSoftFloat) {
is_double_or_float = false; // Regard double as long, float as int.
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index ce2de65..a1a5ad1 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -373,18 +373,21 @@
}
LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -461,7 +464,6 @@
}
LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
- LIR* res;
bool neg = (value < 0);
int32_t abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
@@ -587,6 +589,7 @@
} else {
RegStorage r_scratch = AllocTemp();
LoadConstant(r_scratch, value);
+ LIR* res;
if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
else
@@ -1167,11 +1170,13 @@
}
LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a87b06a..973279e 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -127,7 +127,7 @@
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
// Registers (integer) values.
-enum A64NativeRegisterPool {
+enum A64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
# define A64_DEFINE_REGISTERS(nr) \
rw##nr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | nr, \
rx##nr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | nr, \
@@ -362,9 +362,10 @@
kA64Tbz3rht, // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
kA64Ubfm4rrdd, // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Last,
- kA64NotWide = 0, // Flag used to select the first instruction variant.
- kA64Wide = 0x1000 // Flag used to select the second instruction variant.
+ kA64NotWide = kA64First, // 0 - Flag used to select the first instruction variant.
+ kA64Wide = 0x1000 // Flag used to select the second instruction variant.
};
+std::ostream& operator<<(std::ostream& os, const A64Opcode& rhs);
/*
* The A64 instruction set provides two variants for many instructions. For example, "mov wN, wM"
@@ -414,6 +415,7 @@
kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const A64EncodingKind & rhs);
// Struct used to define the snippet positions for each A64 opcode.
struct A64EncodingMap {
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index e2ff090..da7ac87 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -779,8 +779,8 @@
// and zr. This means that these two registers do not need any special treatment, as
// their bottom 5 bits are correctly set to 31 == 0b11111, which is the right
// value for encoding both sp and zr.
- COMPILE_ASSERT((rxzr & 0x1f) == 0x1f, rzr_register_number_must_be_31);
- COMPILE_ASSERT((rsp & 0x1f) == 0x1f, rsp_register_number_must_be_31);
+ static_assert((rxzr & 0x1f) == 0x1f, "rzr register number must be 31");
+ static_assert((rsp & 0x1f) == 0x1f, "rsp register number must be 31");
}
value = (operand << encoder->field_loc[i].start) &
@@ -928,14 +928,13 @@
// Check if branch offset can be encoded in tbz/tbnz.
if (!IS_SIGNED_IMM14(delta >> 2)) {
DexOffset dalvik_offset = lir->dalvik_offset;
- int16_t opcode = lir->opcode;
- LIR* target = lir->target;
+ LIR* targetLIR = lir->target;
// "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)".
offset_adjustment -= lir->flags.size;
- int32_t imm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
- DCHECK_NE(imm, -1);
+ int32_t encodedImm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
+ DCHECK_NE(encodedImm, -1);
lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl;
- lir->operands[1] = imm;
+ lir->operands[1] = encodedImm;
lir->target = nullptr;
lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup;
lir->flags.size = EncodingMap[kA64Tst2rl].size;
@@ -944,7 +943,7 @@
opcode = UNWIDE(opcode);
DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht);
LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct,
- opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, target);
+ opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, targetLIR);
InsertLIRAfter(lir, new_lir);
new_lir->offset = lir->offset + lir->flags.size;
new_lir->flags.generation = generation;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index c898e2d..a9a58a3 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -401,6 +401,7 @@
}
static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
+ UNUSED(cu, target_method);
// Always emit relative calls.
return true;
}
@@ -411,9 +412,10 @@
*/
static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, unused_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_code != 0 && direct_method != 0) {
switch (state) {
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 9f02606..bd363c4 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -188,7 +188,7 @@
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
@@ -249,10 +249,10 @@
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
bool skip_this) OVERRIDE;
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 418d81e..965759b 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -32,11 +32,13 @@
}
LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
+ UNUSED(ccode, guide);
LOG(FATAL) << "Unexpected use of OpIT for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
}
@@ -174,13 +176,14 @@
void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(rs_dest.Valid());
OpRegReg(kOpCmp, left_op, right_op);
GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
}
void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -406,6 +409,7 @@
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -450,6 +454,7 @@
bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -590,13 +595,16 @@
}
bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
- return false;
+ UNREACHABLE();
}
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -615,8 +623,9 @@
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
@@ -929,25 +938,27 @@
}
LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
+ RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
+ int first_bit, int second_bit) {
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
if (first_bit != 0) {
OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
-void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
}
@@ -1311,7 +1322,7 @@
void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
- int flags) {
+ int flags ATTRIBUTE_UNUSED) {
OpKind op = kOpBkpt;
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
@@ -1467,8 +1478,8 @@
}
}
-static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
int core_count = POPCOUNT(core_reg_mask);
@@ -1490,7 +1501,7 @@
}
static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+ uint32_t fp_reg_mask) {
// Otherwise, spill both core and fp regs at the same time.
// The very first instruction will be an stp with pre-indexed address, moving the stack pointer
// down. From then on, we fill upwards. This will generate overall the same number of instructions
@@ -1613,9 +1624,9 @@
// This case is also optimal when we have an odd number of core spills, and an even (non-zero)
// number of fp spills.
if ((RoundUp(frame_size, 8) / 8 <= 63)) {
- return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
} else {
- return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
}
}
@@ -1653,6 +1664,7 @@
void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
int frame_size) {
+ DCHECK(base == rs_sp);
// Restore saves and drop stack frame.
// 2 versions:
//
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index ba47883..094ff51 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -589,11 +589,10 @@
call_method_insns_(arena->Adapter()) {
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kA64Last; i++) {
- if (UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode) != i) {
- LOG(FATAL) << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode), i)
+ << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 6985b73..47ccc46 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -306,7 +306,7 @@
// algorithm will give it a low priority for promotion, even when it is referenced many times in
// the code.
-bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
// A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
// We therefore return true and give it a low priority for promotion.
return true;
@@ -673,19 +673,24 @@
}
}
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
+ MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
return nullptr;
}
LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -1386,16 +1391,20 @@
}
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ UNUSED(r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
- return NULL;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt,
+ QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
+ // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 80a1ac4..9403516 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -105,17 +105,17 @@
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
first_lir_insn_ = lir->next;
- if (lir->next != NULL) {
- lir->next->prev = NULL;
+ if (lir->next != nullptr) {
+ lir->next->prev = nullptr;
} else {
- DCHECK(lir->next == NULL);
+ DCHECK(lir->next == nullptr);
DCHECK(lir == last_lir_insn_);
- last_lir_insn_ = NULL;
+ last_lir_insn_ = nullptr;
}
} else if (lir == last_lir_insn_) {
last_lir_insn_ = lir->prev;
- lir->prev->next = NULL;
- } else if ((lir->prev != NULL) && (lir->next != NULL)) {
+ lir->prev->next = nullptr;
+ } else if ((lir->prev != nullptr) && (lir->next != nullptr)) {
lir->prev->next = lir->next;
lir->next->prev = lir->prev;
}
@@ -334,10 +334,10 @@
<< static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
DumpPromotionMap();
UpdateLIROffsets();
- for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = first_lir_insn_; lir_insn != nullptr; lir_insn = lir_insn->next) {
DumpLIRInsn(lir_insn, 0);
}
- for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = literal_list_; lir_insn != nullptr; lir_insn = lir_insn->next) {
LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
lir_insn->operands[0]);
}
@@ -368,13 +368,13 @@
return data_target;
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact wide match */
LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
bool lo_match = false;
- LIR* lo_target = NULL;
+ LIR* lo_target = nullptr;
while (data_target) {
if (lo_match && (data_target->operands[0] == val_hi)) {
// Record high word in case we need to expand this later.
@@ -388,7 +388,7 @@
}
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact method match */
@@ -431,7 +431,7 @@
estimated_native_code_size_ += sizeof(value);
return new_value;
}
- return NULL;
+ return nullptr;
}
/* Add a 64-bit constant to the constant pool or mixed with code */
@@ -469,14 +469,14 @@
void Mir2Lir::InstallLiteralPools() {
AlignBuffer(code_buffer_, data_offset_);
LIR* data_lir = literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
Push32(code_buffer_, data_lir->operands[0]);
data_lir = NEXT_LIR(data_lir);
}
// TODO: patches_.reserve() as needed.
// Push code and method literals, record offsets for the compiler to patch.
data_lir = code_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -486,7 +486,7 @@
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -497,7 +497,7 @@
}
// Push class literals.
data_lir = class_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_type_idx = data_lir->operands[0];
const DexFile* class_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -577,7 +577,7 @@
}
static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += 4;
}
@@ -588,7 +588,7 @@
unsigned int element_size) {
// Align to natural pointer size.
offset = RoundUp(offset, element_size);
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += element_size;
}
@@ -642,7 +642,7 @@
uint32_t dex2pc_entries = 0u;
uint32_t dex2pc_offset = 0u;
uint32_t dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
pc2dex_src_entries++;
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
pc2dex_entries += 1;
@@ -682,7 +682,7 @@
pc2dex_dalvik_offset = 0u;
dex2pc_offset = 0u;
dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (generate_src_map && !tgt_lir->flags.is_nop) {
src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
static_cast<int32_t>(tgt_lir->dalvik_offset)}));
@@ -717,7 +717,7 @@
CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
auto it = table.PcToDexBegin();
auto it2 = table.DexToPcBegin();
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
@@ -758,7 +758,7 @@
uint32_t native_offset = it.NativePcOffset();
uint32_t dex_pc = it.DexPc();
const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
- CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
+ CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
native_gc_map_builder.AddEntry(native_offset, references);
}
@@ -768,8 +768,8 @@
int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
offset = AssignLiteralOffsetCommon(literal_list_, offset);
constexpr unsigned int ptr_size = sizeof(uint32_t);
- COMPILE_ASSERT(ptr_size >= sizeof(mirror::HeapReference<mirror::Object>),
- ptr_size_cannot_hold_a_heap_reference);
+ static_assert(ptr_size >= sizeof(mirror::HeapReference<mirror::Object>),
+ "Pointer size cannot hold a heap reference");
offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size);
offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size);
offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size);
@@ -904,6 +904,7 @@
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+ UNUSED(offset);
// NOTE: only used for debug listings.
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
@@ -925,7 +926,7 @@
case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
default:
LOG(FATAL) << "Unexpected opcode " << opcode;
- is_taken = false;
+ UNREACHABLE();
}
return is_taken;
}
@@ -941,8 +942,8 @@
case kCondLe: res = kCondGe; break;
case kCondGe: res = kCondLe; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -957,8 +958,8 @@
case kCondLe: res = kCondGt; break;
case kCondGe: res = kCondLt; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -966,11 +967,11 @@
// TODO: move to mir_to_lir.cc
Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Backend(arena),
- literal_list_(NULL),
- method_literal_list_(NULL),
- class_literal_list_(NULL),
- code_literal_list_(NULL),
- first_fixup_(NULL),
+ literal_list_(nullptr),
+ method_literal_list_(nullptr),
+ class_literal_list_(nullptr),
+ code_literal_list_(nullptr),
+ first_fixup_(nullptr),
cu_(cu),
mir_graph_(mir_graph),
switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
@@ -980,8 +981,8 @@
pointer_storage_(arena->Adapter()),
data_offset_(0),
total_size_(0),
- block_label_list_(NULL),
- promotion_map_(NULL),
+ block_label_list_(nullptr),
+ promotion_map_(nullptr),
current_dalvik_offset_(0),
estimated_native_code_size_(0),
reg_pool_(nullptr),
@@ -994,8 +995,8 @@
frame_size_(0),
core_spill_mask_(0),
fp_spill_mask_(0),
- first_lir_insn_(NULL),
- last_lir_insn_(NULL),
+ first_lir_insn_(nullptr),
+ last_lir_insn_(nullptr),
slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena) {
@@ -1005,8 +1006,8 @@
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for NULL.
- size_t null_idx = WrapPointer(NULL);
+ // Reserve pointer id 0 for nullptr.
+ size_t null_idx = WrapPointer(nullptr);
DCHECK_EQ(null_idx, 0U);
}
@@ -1126,14 +1127,14 @@
* unit
*/
void Mir2Lir::AppendLIR(LIR* lir) {
- if (first_lir_insn_ == NULL) {
- DCHECK(last_lir_insn_ == NULL);
+ if (first_lir_insn_ == nullptr) {
+ DCHECK(last_lir_insn_ == nullptr);
last_lir_insn_ = first_lir_insn_ = lir;
- lir->prev = lir->next = NULL;
+ lir->prev = lir->next = nullptr;
} else {
last_lir_insn_->next = lir;
lir->prev = last_lir_insn_;
- lir->next = NULL;
+ lir->next = nullptr;
last_lir_insn_ = lir;
}
}
@@ -1145,7 +1146,7 @@
* prev_lir <-> new_lir <-> current_lir
*/
void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
- DCHECK(current_lir->prev != NULL);
+ DCHECK(current_lir->prev != nullptr);
LIR *prev_lir = current_lir->prev;
prev_lir->next = new_lir;
@@ -1216,7 +1217,7 @@
void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1233,7 +1234,7 @@
void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1291,7 +1292,9 @@
}
void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0f1d765..e12d305 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -68,40 +68,41 @@
false, // kIntrinsicUnsafePut
true, // kIntrinsicSystemArrayCopyCharArray
};
-COMPILE_ASSERT(arraysize(kIntrinsicIsStatic) == kInlineOpNop, check_arraysize_kIntrinsicIsStatic);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicDoubleCvt], DoubleCvt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloatCvt], FloatCvt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBits], ReverseBits_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBytes], ReverseBytes_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsInt], AbsInt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsLong], AbsLong_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsFloat], AbsFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsDouble], AbsDouble_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxInt], MinMaxInt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxLong], MinMaxLong_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCeil], Ceil_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], Get_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIndexOf], IndexOf_must_not_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCurrentThread], CurrentThread_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPeek], Peek_must_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPoke], Poke_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCas], Cas_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], UnsafeGet_must_not_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafePut], UnsafePut_must_not_be_static);
-COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
- SystemArrayCopyCharArray_must_be_static);
+static_assert(arraysize(kIntrinsicIsStatic) == kInlineOpNop,
+ "arraysize of kIntrinsicIsStatic unexpected");
+static_assert(kIntrinsicIsStatic[kIntrinsicDoubleCvt], "DoubleCvt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicFloatCvt], "FloatCvt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicReverseBits], "ReverseBits must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicAbsDouble], "AbsDouble must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxInt], "MinMaxInt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxLong], "MinMaxLong_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], "MinMaxFloat_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], "MinMaxDouble_must_be_static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSqrt], "Sqrt must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicCeil], "Ceil must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicFloor], "Floor must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRint], "Rint must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRoundFloat], "RoundFloat must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicRoundDouble], "RoundDouble must be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicCurrentThread], "CurrentThread must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicPeek], "Peek must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicPoke], "Poke must be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicCas], "Cas must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], "UnsafeGet_must_not_be_static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafePut], "UnsafePut must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
+ "SystemArrayCopyCharArray must be static");
-MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
+MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
MIR* insn = mir_graph->NewMIR();
insn->offset = invoke->offset;
insn->optimization_flags = MIR_CALLEE;
@@ -395,12 +396,15 @@
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
dex_file_(NULL) {
- COMPILE_ASSERT(kClassCacheFirst == 0, kClassCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kClassCacheNames) == kClassCacheLast, bad_arraysize_kClassCacheNames);
- COMPILE_ASSERT(kNameCacheFirst == 0, kNameCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kNameCacheNames) == kNameCacheLast, bad_arraysize_kNameCacheNames);
- COMPILE_ASSERT(kProtoCacheFirst == 0, kProtoCacheFirst_not_0);
- COMPILE_ASSERT(arraysize(kProtoCacheDefs) == kProtoCacheLast, bad_arraysize_kProtoCacheNames);
+ static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
+ static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
+ "bad arraysize for kClassCacheNames");
+ static_assert(kNameCacheFirst == 0, "kNameCacheFirst not 0");
+ static_assert(arraysize(kNameCacheNames) == kNameCacheLast,
+ "bad arraysize for kNameCacheNames");
+ static_assert(kProtoCacheFirst == 0, "kProtoCacheFirst not 0");
+ static_assert(arraysize(kProtoCacheDefs) == kProtoCacheLast,
+ "bad arraysize kProtoCacheNames");
}
DexFileMethodInliner::~DexFileMethodInliner() {
@@ -555,11 +559,11 @@
break;
case kInlineOpIGet:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIGet(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIGet(mir_graph, bb, invoke, move_result, method);
break;
case kInlineOpIPut:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIPut(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
break;
default:
LOG(FATAL) << "Unexpected inline op: " << method.opcode;
@@ -737,7 +741,7 @@
method.d.data == 0u));
// Insert the CONST instruction.
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = Instruction::CONST;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = method.d.data;
@@ -775,7 +779,7 @@
}
// Insert the move instruction
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = arg;
@@ -784,8 +788,7 @@
}
bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -819,7 +822,7 @@
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->offset = invoke->offset;
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
@@ -836,8 +839,7 @@
}
bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -881,7 +883,7 @@
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = src_reg;
insn->dalvikInsn.vB = object_reg;
@@ -895,7 +897,7 @@
bb->InsertMIRAfter(invoke, insn);
if (move_result != nullptr) {
- MIR* move = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* move = AllocReplacementMIR(mir_graph, invoke);
move->offset = move_result->offset;
if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 30a2d90..cb521da 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,9 +31,9 @@
class MethodVerifier;
} // namespace verifier
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
-struct MIR;
+class MIR;
class MIRGraph;
class Mir2Lir;
@@ -315,9 +315,9 @@
static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
MIR* move_result, const InlineMethod& method);
static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
ReaderWriterMutex lock_;
/*
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index d76a870..c5aa27c 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -66,8 +66,8 @@
void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) {
+ DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in) {
}
void Compile() OVERRIDE {
@@ -84,9 +84,10 @@
void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), length_(length) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in,
+ RegStorage length_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), length_(length_in) {
}
void Compile() OVERRIDE {
@@ -108,9 +109,9 @@
void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), length_(length) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), length_(length_in) {
}
void Compile() OVERRIDE {
@@ -212,8 +213,7 @@
}
void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken,
- LIR* fall_through) {
+ RegLocation rl_src2, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = (rl_src1.ref || rl_src2.ref) ? kRefReg : kCoreReg;
switch (opcode) {
@@ -276,8 +276,7 @@
OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
}
-void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
- LIR* fall_through) {
+void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = rl_src.ref ? kRefReg : kCoreReg;
rl_src = LoadValue(rl_src, reg_class);
@@ -463,7 +462,7 @@
// Set up the loop counter (known to be > 0)
LoadConstant(r_idx, elems - 1);
// Generate the copy loop. Going backwards for convenience
- LIR* target = NewLIR0(kPseudoTargetLabel);
+ LIR* loop_head_target = NewLIR0(kPseudoTargetLabel);
// Copy next element
{
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -473,7 +472,7 @@
}
StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
FreeTemp(r_val);
- OpDecAndBranch(kCondGe, r_idx, target);
+ OpDecAndBranch(kCondGe, r_idx, loop_head_target);
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
// Restore the target pointer
OpRegRegImm(kOpAdd, ref_reg, r_dst,
@@ -957,7 +956,6 @@
RegLocation rl_method = LoadCurrMethod();
CheckRegLocation(rl_method);
RegStorage res_reg = AllocTempRef();
- RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
*cu_->dex_file,
type_idx)) {
@@ -967,6 +965,7 @@
RegLocation rl_result = GetReturn(kRefReg);
StoreValue(rl_dest, rl_result);
} else {
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
@@ -983,10 +982,10 @@
// Object to generate the slow path for class resolution.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
- const RegLocation& rl_method, const RegLocation& rl_result) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
- rl_method_(rl_method), rl_result_(rl_result) {
+ SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
+ const RegLocation& rl_method_in, const RegLocation& rl_result_in) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
+ type_idx_(type_idx_in), rl_method_(rl_method_in), rl_result_(rl_result_in) {
}
void Compile() {
@@ -1047,9 +1046,10 @@
// Object to generate the slow path for string resolution.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont),
- r_method_(r_method), string_idx_(string_idx) {
+ SlowPath(Mir2Lir* m2l, LIR* fromfast_in, LIR* cont_in, RegStorage r_method_in,
+ int32_t string_idx_in) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast_in, cont_in),
+ r_method_(r_method_in), string_idx_(string_idx_in) {
}
void Compile() {
@@ -1227,10 +1227,10 @@
class InitTypeSlowPath : public Mir2Lir::LIRSlowPath {
public:
- InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx,
- RegLocation rl_src)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx),
- rl_src_(rl_src) {
+ InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx_in,
+ RegLocation rl_src_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx_in),
+ rl_src_(rl_src_in) {
}
void Compile() OVERRIDE {
@@ -1372,10 +1372,10 @@
// Slow path to initialize the type. Executed if the type is NULL.
class SlowPath : public LIRSlowPath {
public:
- SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
- const RegStorage class_reg) :
- LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
- class_reg_(class_reg) {
+ SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
+ const RegStorage class_reg_in) :
+ LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
+ type_idx_(type_idx_in), class_reg_(class_reg_in) {
}
void Compile() {
@@ -2134,12 +2134,14 @@
/* Call out to helper assembly routine that will null check obj and then lock it. */
void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
@@ -2156,25 +2158,25 @@
const uint16_t entries = table[1];
// Chained cmp-and-branch.
const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]);
- int32_t current_key = as_int32[0];
+ int32_t starting_key = as_int32[0];
const int32_t* targets = &as_int32[1];
rl_src = LoadValue(rl_src, kCoreReg);
int i = 0;
- for (; i < entries; i++, current_key++) {
- if (!InexpensiveConstantInt(current_key, Instruction::Code::IF_EQ)) {
+ for (; i < entries; i++) {
+ if (!InexpensiveConstantInt(starting_key + i, Instruction::Code::IF_EQ)) {
// Switch to using a temp and add.
break;
}
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block->id]);
}
if (i < entries) {
// The rest do not seem to be inexpensive. Try to allocate a temp and use add.
RegStorage key_temp = AllocTypedTemp(false, kCoreReg, false);
if (key_temp.Valid()) {
- LoadConstantNoClobber(key_temp, current_key);
- for (; i < entries - 1; i++, current_key++) {
+ LoadConstantNoClobber(key_temp, starting_key + i);
+ for (; i < entries - 1; i++) {
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]);
@@ -2185,10 +2187,10 @@
OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]);
} else {
// No free temp, just finish the old loop.
- for (; i < entries; i++, current_key++) {
+ for (; i < entries; i++) {
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]);
+ OpCmpImmBranch(kCondEq, rl_src.reg, starting_key + i, &block_label_list_[case_block->id]);
}
}
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index bc4d00b..4cb12f1 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -44,8 +44,8 @@
void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
public:
- IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
- : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
+ IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
+ : LIRSlowPath(m2l, info_in->offset, branch_in, resume_in), info_(info_in) {
}
void Compile() {
@@ -473,8 +473,7 @@
cg->MarkPossibleNullPointerException(info->opt_flags);
}
-static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
- const RegStorage* alt_from,
+static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
@@ -492,9 +491,10 @@
*/
static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info);
DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 &&
cu->instruction_set != kThumb2 && cu->instruction_set != kArm &&
cu->instruction_set != kArm64);
@@ -547,7 +547,7 @@
break;
case 3: // Grab the code from the method*
if (direct_code == 0) {
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) {
break; // kInvokeTgt := arg0_ref->entrypoint
}
} else {
@@ -571,8 +571,9 @@
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
- InvokeType unused3) {
+ uint32_t method_idx, uintptr_t, uintptr_t,
+ InvokeType) {
+ UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -595,7 +596,7 @@
break;
}
case 3:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -614,8 +615,7 @@
*/
static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t direct_method, InvokeType unused2) {
+ uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
switch (state) {
@@ -641,7 +641,7 @@
break;
}
case 4:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -655,9 +655,9 @@
static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
QuickEntrypointEnum trampoline, int state,
const MethodReference& target_method, uint32_t method_idx) {
+ UNUSED(info, method_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-
/*
* This handles the case in which the base method is not fully
* resolved at compile time, we bail to a runtime helper.
@@ -684,32 +684,28 @@
static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -717,8 +713,7 @@
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -795,13 +790,13 @@
if (rl_arg.reg.IsPair()) {
reg = rl_arg.reg.GetHigh();
} else {
- RegisterInfo* info = GetRegInfo(rl_arg.reg);
- info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
- if (info == nullptr) {
+ RegisterInfo* reg_info = GetRegInfo(rl_arg.reg);
+ reg_info = reg_info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
+ if (reg_info == nullptr) {
// NOTE: For hard float convention we won't split arguments across reg/mem.
UNIMPLEMENTED(FATAL) << "Needs hard float api.";
}
- reg = info->GetReg();
+ reg = reg_info->GetReg();
}
} else {
// kArg2 & rArg3 can safely be used here
@@ -1400,28 +1395,34 @@
}
bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, size);
return false;
}
bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, is_min, is_double);
return false;
}
bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ UNUSED(info, is_double);
return false;
}
@@ -1448,6 +1449,7 @@
}
bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ UNUSED(info);
return false;
}
@@ -1690,7 +1692,6 @@
const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
- BeginInvoke(info);
InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
@@ -1734,7 +1735,6 @@
method_info.DirectCode(), method_info.DirectMethod(), original_type);
}
LIR* call_insn = GenCallInsn(method_info);
- EndInvoke(info);
MarkSafepointPC(call_insn);
ClobberCallerSave();
@@ -1755,6 +1755,7 @@
}
LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
+ UNUSED(method_info);
DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 &&
cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm &&
cu_->instruction_set != kArm64);
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 01d1a1e..ca71c30 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -146,12 +146,10 @@
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
"div", "!0r,!1r", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsExt, 0x7c000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
"ext", "!0r,!1r,!2d,!3D", 4),
-#endif
ENCODING_MAP(kMipsJal, 0x0c000000,
kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -240,7 +238,6 @@
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
"sb", "!0r,!1d(!2r)", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsSeb, 0x7c000420,
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -249,7 +246,6 @@
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"seh", "!0r,!1r", 4),
-#endif
ENCODING_MAP(kMipsSh, 0xA4000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
@@ -700,12 +696,12 @@
// TUNING: replace with proper delay slot handling
if (encoder->size == 8) {
DCHECK(!IsPseudoLirOp(lir->opcode));
- const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
- uint32_t bits = encoder->skeleton;
- code_buffer_.push_back(bits & 0xff);
- code_buffer_.push_back((bits >> 8) & 0xff);
- code_buffer_.push_back((bits >> 16) & 0xff);
- code_buffer_.push_back((bits >> 24) & 0xff);
+ const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop];
+ uint32_t bits2 = encoder2->skeleton;
+ code_buffer_.push_back(bits2 & 0xff);
+ code_buffer_.push_back((bits2 >> 8) & 0xff);
+ code_buffer_.push_back((bits2 >> 16) & 0xff);
+ code_buffer_.push_back((bits2 >> 24) & 0xff);
}
}
return res;
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8b5bc45..01784e2 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -24,9 +24,9 @@
namespace art {
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
- const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
// TODO
+ UNUSED(bb, mir, special);
return false;
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 508d474..dc6930c 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -121,7 +121,7 @@
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -172,10 +172,10 @@
bool InexpensiveConstantLong(int64_t value);
bool InexpensiveConstantDouble(int64_t value);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 3a4128a..4315915 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -207,8 +207,8 @@
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
- bool gt_bias, bool is_double) {
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+ UNUSED(bb, mir, gt_bias, is_double);
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
@@ -230,7 +230,8 @@
}
bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
- // TODO: need Mips implementation
+ // TODO: need Mips implementation.
+ UNUSED(info, is_min, is_long);
return false;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index baf7311..d58ddb0 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -217,7 +217,8 @@
void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
@@ -228,10 +229,12 @@
}
void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
@@ -262,34 +265,39 @@
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
- return rl_dest;
+ UNREACHABLE();
}
bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info, is_long, is_object);
return false;
}
bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info);
return false;
}
@@ -325,23 +333,27 @@
}
LIR* MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ UNUSED(reg, target);
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -373,27 +385,31 @@
bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
- return false;
+ UNREACHABLE();
}
bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
- return false;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
-
void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -416,6 +432,7 @@
void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -629,6 +646,7 @@
void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift, int flags) {
+ UNUSED(flags);
// Default implementation is just to ignore the constant case.
GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
}
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 495eb16..3615916 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -142,7 +142,7 @@
// This bit determines how the CPU access FP registers.
#define FR_BIT 0
-enum MipsNativeRegisterPool {
+enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -408,9 +408,7 @@
kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
kMipsDiv, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
-#if __mips_isa_rev >= 2
kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
-#endif
kMipsJal, // jal t [000011] t[25..0].
kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
@@ -433,10 +431,8 @@
kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
-#if __mips_isa_rev >= 2
kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
-#endif
kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
@@ -481,15 +477,17 @@
kMipsUndefined, // undefined [011001xxxxxxxxxxxxxxxx].
kMipsLast
};
+std::ostream& operator<<(std::ostream& os, const MipsOpCode& rhs);
// Instruction assembly field_loc kind.
enum MipsEncodingKind {
kFmtUnused,
- kFmtBitBlt, /* Bit string using end/start */
- kFmtDfp, /* Double FP reg */
- kFmtSfp, /* Single FP reg */
- kFmtBlt5_2, /* Same 5-bit field to 2 locations */
+ kFmtBitBlt, // Bit string using end/start.
+ kFmtDfp, // Double FP reg.
+ kFmtSfp, // Single FP reg
+ kFmtBlt5_2, // Same 5-bit field to 2 locations.
};
+std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
// Struct used to define the snippet positions for each MIPS opcode.
struct MipsEncodingMap {
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index d3719ab..4a340ec 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -421,6 +421,7 @@
}
bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+ UNUSED(barrier_kind);
#if ANDROID_SMP != 0
NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
return true;
@@ -574,11 +575,10 @@
MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena) {
for (int i = 0; i < kMipsLast; i++) {
- if (MipsMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 7178ede..044972c 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -56,14 +56,17 @@
}
bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
@@ -320,25 +323,28 @@
return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
default:
LOG(FATAL) << "Bad case in OpRegReg";
- break;
+ UNREACHABLE();
}
return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
@@ -681,16 +687,19 @@
}
LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+ UNUSED(cc, target);
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 408606d..533a677 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -647,7 +647,6 @@
case Instruction::IF_GT:
case Instruction::IF_LE: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const && rl_src[1].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
@@ -664,7 +663,7 @@
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
}
break;
}
@@ -676,7 +675,6 @@
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
@@ -692,7 +690,7 @@
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
+ GenCompareZeroAndBranch(opcode, rl_src[0], taken);
}
break;
}
@@ -1377,8 +1375,9 @@
}
size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
- UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
- return 0;
+ UNUSED(lir);
+ UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f4e6dfe..4623f79 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -33,6 +33,7 @@
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
#include "utils/arena_containers.h"
+#include "utils/arena_object.h"
#include "utils/stack_checks.h"
namespace art {
@@ -129,11 +130,11 @@
#define INVALID_SREG (-1)
#endif
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
-struct MIR;
+class MIR;
struct LIR;
struct RegisterInfo;
class DexFileMethodInliner;
@@ -318,13 +319,10 @@
* Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and
* mechanism 2 for aliased float registers and x86 vector registers.
*/
- class RegisterInfo {
+ class RegisterInfo : public ArenaObject<kArenaAllocRegAlloc> {
public:
RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll);
~RegisterInfo() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocRegAlloc);
- }
static const uint32_t k32SoloStorageMask = 0x00000001;
static const uint32_t kLowSingleStorageMask = 0x00000001;
@@ -420,7 +418,7 @@
RegisterInfo* alias_chain_; // Chain of aliased registers.
};
- class RegisterPool {
+ class RegisterPool : public DeletableArenaObject<kArenaAllocRegAlloc> {
public:
RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
const ArrayRef<const RegStorage>& core_regs,
@@ -434,10 +432,6 @@
const ArrayRef<const RegStorage>& sp_temps,
const ArrayRef<const RegStorage>& dp_temps);
~RegisterPool() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocRegAlloc);
- }
- static void operator delete(void* ptr) { UNUSED(ptr); }
void ResetNextTemp() {
next_core_reg_ = 0;
next_sp_reg_ = 0;
@@ -501,20 +495,15 @@
// has completed.
//
- class LIRSlowPath {
+ class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> {
public:
LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
LIR* cont = nullptr) :
m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
- m2l->StartSlowPath(this);
}
virtual ~LIRSlowPath() {}
virtual void Compile() = 0;
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocData);
- }
-
LIR *GetContinuationLabel() {
return cont_;
}
@@ -694,11 +683,6 @@
void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
- virtual void StartSlowPath(LIRSlowPath* slowpath) {}
- virtual void BeginInvoke(CallInfo* info) {}
- virtual void EndInvoke(CallInfo* info) {}
-
-
// Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated.
virtual RegLocation NarrowRegLoc(RegLocation loc);
@@ -822,10 +806,9 @@
LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
- void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken, LIR* fall_through);
- void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
- LIR* taken, LIR* fall_through);
+ void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2,
+ LIR* taken);
+ void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src);
@@ -1192,17 +1175,17 @@
virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
if (wide_kind == kWide) {
DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
- COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
- (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
- (kArg7 == kArg6 + 1), kargs_range_unexpected);
- COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
- (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
- (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) &&
- (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) &&
- (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) &&
- (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1),
- kfargs_range_unexpected);
- COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected);
+ static_assert((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
+ (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
+ (kArg7 == kArg6 + 1), "kargs range unexpected");
+ static_assert((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
+ (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
+ (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) &&
+ (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) &&
+ (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) &&
+ (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1),
+ "kfargs range unexpected");
+ static_assert(kRet1 == kRet0 + 1, "kret range unexpected");
return RegStorage::MakeRegPair(TargetReg(reg),
TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
} else {
@@ -1350,7 +1333,7 @@
*/
virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) = 0;
+ RegisterClass dest_reg_class) = 0;
/**
* @brief Used to generate a memory barrier in an architecture specific way.
@@ -1452,6 +1435,7 @@
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+ UNUSED(opcode);
return InexpensiveConstantInt(value);
}
@@ -1642,12 +1626,12 @@
/**
* Returns true iff wide GPRs are just different views on the same physical register.
*/
- virtual bool WideGPRsAreAliases() = 0;
+ virtual bool WideGPRsAreAliases() const = 0;
/**
* Returns true iff wide FPRs are just different views on the same physical register.
*/
- virtual bool WideFPRsAreAliases() = 0;
+ virtual bool WideFPRsAreAliases() const = 0;
enum class WidenessCheck { // private
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 8f7bd30..a54c55f 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -78,14 +78,14 @@
DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
};
-COMPILE_ASSERT(0U == static_cast<size_t>(kNone), kNone_not_0);
-COMPILE_ASSERT(1U == static_cast<size_t>(kArm), kArm_not_1);
-COMPILE_ASSERT(2U == static_cast<size_t>(kArm64), kArm64_not_2);
-COMPILE_ASSERT(3U == static_cast<size_t>(kThumb2), kThumb2_not_3);
-COMPILE_ASSERT(4U == static_cast<size_t>(kX86), kX86_not_4);
-COMPILE_ASSERT(5U == static_cast<size_t>(kX86_64), kX86_64_not_5);
-COMPILE_ASSERT(6U == static_cast<size_t>(kMips), kMips_not_6);
-COMPILE_ASSERT(7U == static_cast<size_t>(kMips64), kMips64_not_7);
+static_assert(0U == static_cast<size_t>(kNone), "kNone not 0");
+static_assert(1U == static_cast<size_t>(kArm), "kArm not 1");
+static_assert(2U == static_cast<size_t>(kArm64), "kArm64 not 2");
+static_assert(3U == static_cast<size_t>(kThumb2), "kThumb2 not 3");
+static_assert(4U == static_cast<size_t>(kX86), "kX86 not 4");
+static_assert(5U == static_cast<size_t>(kX86_64), "kX86_64 not 5");
+static_assert(6U == static_cast<size_t>(kMips), "kMips not 6");
+static_assert(7U == static_cast<size_t>(kMips64), "kMips64 not 7");
// Additional disabled optimizations (over generally disabled) per instruction set.
static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
@@ -118,7 +118,8 @@
// 7 = kMips64.
~0U
};
-COMPILE_ASSERT(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t), kDisabledOpts_unexp);
+static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
+ "kDisabledOpts unexpected");
// Supported shorty types per instruction set. nullptr means that all are available.
// Z : boolean
@@ -149,7 +150,7 @@
// 7 = kMips64.
""
};
-COMPILE_ASSERT(sizeof(kSupportedTypes) == 8 * sizeof(char*), kSupportedTypes_unexp);
+static_assert(sizeof(kSupportedTypes) == 8 * sizeof(char*), "kSupportedTypes unexpected");
static int kAllOpcodes[] = {
Instruction::NOP,
@@ -460,7 +461,7 @@
// 7 = kMips64.
kAllOpcodes
};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), kUnsupportedOpcodes_unexp);
+static_assert(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), "kUnsupportedOpcodes unexpected");
// Size of the arrays stored above.
static const size_t kUnsupportedOpcodesSize[] = {
@@ -481,8 +482,8 @@
// 7 = kMips64.
arraysize(kAllOpcodes),
};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
- kUnsupportedOpcodesSize_unexp);
+static_assert(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
+ "kUnsupportedOpcodesSize unexpected");
// The maximum amount of Dalvik register in a method for which we will start compiling. Tries to
// avoid an abort when we need to manage more SSA registers than we can.
@@ -625,6 +626,7 @@
}
Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ UNUSED(compilation_unit);
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
case kThumb2:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 6305b22..0a98c80 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -316,16 +316,16 @@
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
- return res;
+ UNREACHABLE();
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
- return res;
+ UNREACHABLE();
}
@@ -1392,6 +1392,7 @@
}
bool Mir2Lir::LiveOut(int s_reg) {
+ UNUSED(s_reg);
// For now.
return true;
}
diff --git a/compiler/dex/quick/resource_mask.cc b/compiler/dex/quick/resource_mask.cc
index 17995fb..088bec8 100644
--- a/compiler/dex/quick/resource_mask.cc
+++ b/compiler/dex/quick/resource_mask.cc
@@ -33,16 +33,16 @@
ResourceMask::Bit(ResourceMask::kCCode),
};
// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
- kEncodeHeapRef), check_kNoRegMasks_heap_ref_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
- kEncodeLiteral), check_kNoRegMasks_literal_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
- kEncodeDalvikReg), check_kNoRegMasks_dalvik_reg_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
- ResourceMask::Bit(ResourceMask::kFPStatus)), check_kNoRegMasks_fp_status_index);
-COMPILE_ASSERT(kNoRegMasks[127-ResourceMask::kCCode].Equals(
- ResourceMask::Bit(ResourceMask::kCCode)), check_kNoRegMasks_ccode_index);
+static_assert(kNoRegMasks[127-ResourceMask::kHeapRef].Equals(
+ kEncodeHeapRef), "kNoRegMasks heap ref index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kLiteral].Equals(
+ kEncodeLiteral), "kNoRegMasks literal index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kDalvikReg].Equals(
+ kEncodeDalvikReg), "kNoRegMasks dalvik reg index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kFPStatus].Equals(
+ ResourceMask::Bit(ResourceMask::kFPStatus)), "kNoRegMasks fp status index unexpected");
+static_assert(kNoRegMasks[127-ResourceMask::kCCode].Equals(
+ ResourceMask::Bit(ResourceMask::kCCode)), "kNoRegMasks ccode index unexpected");
template <size_t special_bit>
constexpr ResourceMask OneRegOneSpecial(size_t reg) {
@@ -74,19 +74,19 @@
}
// The 127-bit is the same as CLZ(masks_[1]) for a ResourceMask with only that bit set.
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kHeapRef>(0)), check_kSingleRegMasks_heap_ref_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kLiteral>(0)), check_kSingleRegMasks_literal_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), check_kSingleRegMasks_dalvik_reg_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kFPStatus>(0)), check_kSingleRegMasks_fp_status_index);
-COMPILE_ASSERT(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
- OneRegOneSpecial<ResourceMask::kCCode>(0)), check_kSingleRegMasks_ccode_index);
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kHeapRef, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kHeapRef>(0)), "kSingleRegMasks heap ref index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kLiteral, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kLiteral>(0)), "kSingleRegMasks literal index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kDalvikReg, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kDalvikReg>(0)), "kSingleRegMasks dalvik reg index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kFPStatus, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kFPStatus>(0)), "kSingleRegMasks fp status index unexpected");
+static_assert(kSingleRegMasks[SingleRegMaskIndex(127-ResourceMask::kCCode, 0)].Equals(
+ OneRegOneSpecial<ResourceMask::kCCode>(0)), "kSingleRegMasks ccode index unexpected");
// NOTE: arraysize(kNoRegMasks) multiplied by 32 due to the gcc bug workaround, see above.
-COMPILE_ASSERT(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, check_arraysizes);
+static_assert(arraysize(kSingleRegMasks) == arraysize(kNoRegMasks) * 32, "arraysizes unexpected");
constexpr ResourceMask kTwoRegsMasks[] = {
#define TWO(a, b) ResourceMask::Bit(a).Union(ResourceMask::Bit(b))
@@ -115,7 +115,7 @@
TWO(8, 15), TWO(9, 15), TWO(10, 15), TWO(11, 15), TWO(12, 15), TWO(13, 15), TWO(14, 15),
#undef TWO
};
-COMPILE_ASSERT(arraysize(kTwoRegsMasks) == 16 * 15 / 2, check_arraysize_kTwoRegsMasks);
+static_assert(arraysize(kTwoRegsMasks) == 16 * 15 / 2, "arraysize of kTwoRegsMasks unexpected");
constexpr size_t TwoRegsIndex(size_t higher, size_t lower) {
return (higher * (higher - 1)) / 2u + lower;
@@ -136,7 +136,7 @@
(CheckTwoRegsMaskLine(lines - 1) && CheckTwoRegsMaskTable(lines - 1u));
}
-COMPILE_ASSERT(CheckTwoRegsMaskTable(16), check_two_regs_masks_table);
+static_assert(CheckTwoRegsMaskTable(16), "two regs masks table check failed");
} // anonymous namespace
diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h
index 436cdb5..78e81b2 100644
--- a/compiler/dex/quick/resource_mask.h
+++ b/compiler/dex/quick/resource_mask.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include "base/logging.h"
+#include "base/value_object.h"
#include "dex/reg_storage.h"
namespace art {
@@ -113,10 +114,7 @@
return (masks_[0] & other.masks_[0]) != 0u || (masks_[1] & other.masks_[1]) != 0u;
}
- void SetBit(size_t bit) {
- DCHECK_LE(bit, kHighestCommonResource);
- masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
- }
+ void SetBit(size_t bit);
constexpr bool HasBit(size_t bit) const {
return (masks_[bit / 64u] & (UINT64_C(1) << (bit & 63u))) != 0u;
@@ -139,6 +137,12 @@
friend class ResourceMaskCache;
};
+std::ostream& operator<<(std::ostream& os, const ResourceMask::ResourceBit& rhs);
+
+inline void ResourceMask::SetBit(size_t bit) {
+ DCHECK_LE(bit, kHighestCommonResource);
+ masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
+}
constexpr ResourceMask kEncodeNone = ResourceMask::NoBits();
constexpr ResourceMask kEncodeAll = ResourceMask::AllBits();
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 641e164..ef55054 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -547,6 +547,11 @@
{ kX86RepneScasw, kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs) {
+ os << X86Mir2Lir::EncodingMap[rhs].name;
+ return os;
+}
+
static bool NeedsRex(int32_t raw_reg) {
return RegStorage::RegNum(raw_reg) > 7;
}
@@ -1631,6 +1636,7 @@
* sequence or request that the trace be shortened and retried.
*/
AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+ UNUSED(start_addr);
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 86efc1e..497ef94 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -290,9 +290,10 @@
*/
static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, direct_code);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_method != 0) {
switch (state) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 7b5b831..dec99ae 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -250,7 +250,7 @@
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMoveException(RegLocation rl_dest) OVERRIDE;
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -499,7 +499,7 @@
void GenConstWide(RegLocation rl_dest, int64_t value);
void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
- void GenShiftByteVector(BasicBlock *bb, MIR *mir);
+ void GenShiftByteVector(MIR* mir);
void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
uint32_t m4);
void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
@@ -557,88 +557,80 @@
/*
* @brief Load 128 bit constant into vector register.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector
* @note vA is the TypeSize for the register.
* @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
*/
- void GenConst128(BasicBlock* bb, MIR* mir);
+ void GenConst128(MIR* mir);
/*
* @brief MIR to move a vectorized register to another.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination
* @note vC: source
*/
- void GenMoveVector(BasicBlock *bb, MIR *mir);
+ void GenMoveVector(MIR* mir);
/*
* @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
* the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenMultiplyVector(BasicBlock *bb, MIR *mir);
+ void GenMultiplyVector(MIR* mir);
/*
* @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenAddVector(BasicBlock *bb, MIR *mir);
+ void GenAddVector(MIR* mir);
/*
* @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenSubtractVector(BasicBlock *bb, MIR *mir);
+ void GenSubtractVector(MIR* mir);
/*
* @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
+ void GenShiftLeftVector(MIR* mir);
/*
* @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
* know the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenSignedShiftRightVector(MIR* mir);
/*
* @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
* to know the type of the vector.
- * @param bb The basic block in which the MIR is from..
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenUnsignedShiftRightVector(MIR* mir);
/*
* @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
@@ -647,51 +639,47 @@
* @note vB: destination and source
* @note vC: source
*/
- void GenAndVector(BasicBlock *bb, MIR *mir);
+ void GenAndVector(MIR* mir);
/*
* @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenOrVector(BasicBlock *bb, MIR *mir);
+ void GenOrVector(MIR* mir);
/*
* @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenXorVector(BasicBlock *bb, MIR *mir);
+ void GenXorVector(MIR* mir);
/*
* @brief Reduce a 128-bit packed element into a single VR by taking lower bits
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
* @note vA: TypeSize
* @note vB: destination and source VR (not vector register)
* @note vC: source (vector register)
*/
- void GenAddReduceVector(BasicBlock *bb, MIR *mir);
+ void GenAddReduceVector(MIR* mir);
/*
* @brief Extract a packed element into a single VR.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination VR (not vector register)
* @note vC: source (vector register)
* @note arg[0]: The index to use for extraction from vector register (which packed element).
*/
- void GenReduceVector(BasicBlock *bb, MIR *mir);
+ void GenReduceVector(MIR* mir);
/*
* @brief Create a vector value, with all TypeSize values equal to vC
@@ -701,21 +689,21 @@
* @note vB: destination vector register.
* @note vC: source VR (not vector register).
*/
- void GenSetVector(BasicBlock *bb, MIR *mir);
+ void GenSetVector(MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayGet.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayGet.
*/
- void GenPackedArrayGet(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayPut.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayPut.
*/
- void GenPackedArrayPut(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
/*
* @brief Generate code for a vector opcode.
@@ -890,8 +878,8 @@
* the value is live in a temp register of the correct class. Additionally, if the value is in
* a temp register of the wrong register class, it will be clobbered.
*/
- RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
- RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
+ RegLocation UpdateLocTyped(RegLocation loc);
+ RegLocation UpdateLocWideTyped(RegLocation loc);
/*
* @brief Analyze MIR before generating code, to prepare for the code generation.
@@ -902,7 +890,7 @@
* @brief Analyze one basic block.
* @param bb Basic block to analyze.
*/
- void AnalyzeBB(BasicBlock * bb);
+ void AnalyzeBB(BasicBlock* bb);
/*
* @brief Analyze one extended MIR instruction
@@ -910,7 +898,7 @@
* @param bb Basic block containing instruction.
* @param mir Extended instruction to analyze.
*/
- void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR instruction
@@ -918,7 +906,7 @@
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR float/double instruction
@@ -926,7 +914,7 @@
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one use of a double operand.
@@ -940,7 +928,7 @@
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir);
// Information derived from analysis of MIR
@@ -987,12 +975,11 @@
*/
LIR* AddVectorLiteral(int32_t* constants);
- InToRegStorageMapping in_to_reg_storage_mapping_;
-
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return cu_->target64; // On 64b, we have 64b GPRs.
}
- bool WideFPRsAreAliases() OVERRIDE {
+
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // xmm registers have 64b views even on x86.
}
@@ -1002,11 +989,17 @@
*/
static void DumpRegLocation(RegLocation loc);
- static const X86EncodingMap EncodingMap[kX86Last];
+ InToRegStorageMapping in_to_reg_storage_mapping_;
private:
void SwapBits(RegStorage result_reg, int shift, int32_t value);
void SwapBits64(RegStorage result_reg, int shift, int64_t value);
+
+ static const X86EncodingMap EncodingMap[kX86Last];
+
+ friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
};
} // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 21d1a5c..254d90f 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -169,8 +169,7 @@
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
/*
* We already know that the result is in a physical register but do not know if it is the
@@ -431,8 +430,7 @@
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index aa1bf7f..26465a5 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -208,7 +208,7 @@
void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
@@ -268,6 +268,7 @@
}
void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -594,8 +595,9 @@
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
+ UNUSED(rl_dest, reg_lo, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
@@ -763,12 +765,14 @@
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) {
+ UNUSED(rl_dest, reg_lo, reg_hi, is_div);
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest);
// We have to use fixed registers, so flush all the temps.
// Prepare for explicit register usage.
@@ -1022,7 +1026,7 @@
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
if (!cu_->target64 && size == kSignedByte) {
- rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg);
+ rl_src_value = UpdateLocTyped(rl_src_value);
if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
RegStorage temp = AllocateByteRegister();
OpRegCopy(temp, rl_src_value.reg);
@@ -1309,18 +1313,21 @@
}
LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for x86";
- return NULL;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1355,10 +1362,10 @@
int len_offset) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
- RegStorage index, RegStorage array_base, int32_t len_offset)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), array_base_(array_base), len_offset_(len_offset) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
+ RegStorage index_in, RegStorage array_base_in, int32_t len_offset_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
}
void Compile() OVERRIDE {
@@ -1403,10 +1410,10 @@
int32_t len_offset) {
class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
- ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
- int32_t index, RegStorage array_base, int32_t len_offset)
- : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
- index_(index), array_base_(array_base), len_offset_(len_offset) {
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
+ int32_t index_in, RegStorage array_base_in, int32_t len_offset_in)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+ index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
}
void Compile() OVERRIDE {
@@ -1453,22 +1460,27 @@
bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
- return false;
+ UNREACHABLE();
}
bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in x86";
- return false;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in x86";
+ UNREACHABLE();
}
void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
@@ -1486,6 +1498,7 @@
}
void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+ UNUSED(sreg);
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1616,7 +1629,7 @@
int32_t val_hi = High32Bits(val);
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
bool src1_in_reg = rl_src1.location == kLocPhysReg;
int displacement = SRegOffset(rl_src1.s_reg_low);
@@ -1700,8 +1713,8 @@
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// At this point, the VRs are in their home locations.
bool src1_in_reg = rl_src1.location == kLocPhysReg;
@@ -1837,12 +1850,12 @@
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if (rl_dest.location == kLocPhysReg) {
// Ensure we are in a register pair
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1850,7 +1863,7 @@
// Handle the case when src and dest are intersect.
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1910,7 +1923,7 @@
rl_result = ForceTempWide(rl_result);
// Perform the operation using the RHS.
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
GenLongRegOrMemOp(rl_result, rl_src2, op);
// And now record that the result is in the temp.
@@ -1919,10 +1932,9 @@
}
// It wasn't in registers, so it better be in memory.
- DCHECK((rl_dest.location == kLocDalvikFrame) ||
- (rl_dest.location == kLocCompilerTemp));
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ DCHECK((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp));
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -2088,7 +2100,7 @@
NewLIR1(kX86Imul64DaR, numerator_reg.GetReg());
} else {
// Only need this once. Multiply directly from the value.
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
if (rl_src.location != kLocPhysReg) {
// Okay, we can do this from memory.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2395,6 +2407,7 @@
RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src, int shift_amount, int flags) {
+ UNUSED(flags);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
@@ -2692,7 +2705,7 @@
return in_mem ? kX86Xor32MI : kX86Xor32RI;
default:
LOG(FATAL) << "Unexpected opcode: " << op;
- return kX86Add32MI;
+ UNREACHABLE();
}
}
@@ -2706,7 +2719,7 @@
return false;
}
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
@@ -2736,7 +2749,7 @@
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
@@ -2812,8 +2825,8 @@
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
@@ -3035,7 +3048,7 @@
if (unary) {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegReg(op, rl_result.reg, rl_lhs.reg);
} else {
@@ -3045,7 +3058,7 @@
LoadValueDirectFixed(rl_rhs, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
OpMemReg(op, rl_result, t_reg.GetReg());
@@ -3068,12 +3081,12 @@
// Multiply is 3 operand only (sort of).
if (is_two_addr && op != kOpMul) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Can we do this from memory directly?
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_rhs = UpdateLocTyped(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
OpRegMem(op, rl_result.reg, rl_rhs);
StoreFinalValue(rl_dest, rl_result);
@@ -3088,7 +3101,7 @@
// It might happen rl_rhs and rl_dest are the same VR
// in this case rl_dest is in reg after LoadValue while
// rl_result is not updated yet, so do this
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory.
OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
@@ -3105,8 +3118,8 @@
}
} else {
// Try to use reg/memory instructions.
- rl_lhs = UpdateLocTyped(rl_lhs, kCoreReg);
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_lhs = UpdateLocTyped(rl_lhs);
+ rl_rhs = UpdateLocTyped(rl_rhs);
// We can't optimize with FP registers.
if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) {
// Something is difficult, so fall back to the standard case.
@@ -3178,7 +3191,7 @@
Mir2Lir::GenIntToLong(rl_dest, rl_src);
return;
}
- rl_src = UpdateLocTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocTyped(rl_src);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
@@ -3278,7 +3291,7 @@
LoadValueDirectFixed(rl_shift, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocWideTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 79d5eeb..270a4e5 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -143,25 +143,6 @@
RegStorage rs_rX86_SP;
-X86NativeRegisterPool rX86_ARG0;
-X86NativeRegisterPool rX86_ARG1;
-X86NativeRegisterPool rX86_ARG2;
-X86NativeRegisterPool rX86_ARG3;
-X86NativeRegisterPool rX86_ARG4;
-X86NativeRegisterPool rX86_ARG5;
-X86NativeRegisterPool rX86_FARG0;
-X86NativeRegisterPool rX86_FARG1;
-X86NativeRegisterPool rX86_FARG2;
-X86NativeRegisterPool rX86_FARG3;
-X86NativeRegisterPool rX86_FARG4;
-X86NativeRegisterPool rX86_FARG5;
-X86NativeRegisterPool rX86_FARG6;
-X86NativeRegisterPool rX86_FARG7;
-X86NativeRegisterPool rX86_RET0;
-X86NativeRegisterPool rX86_RET1;
-X86NativeRegisterPool rX86_INVOKE_TGT;
-X86NativeRegisterPool rX86_COUNT;
-
RegStorage rs_rX86_ARG0;
RegStorage rs_rX86_ARG1;
RegStorage rs_rX86_ARG2;
@@ -237,8 +218,9 @@
}
RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ UNUSED(reg);
LOG(FATAL) << "Do not use this function!!!";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
/*
@@ -795,14 +777,11 @@
class_type_address_insns_.reserve(100);
call_method_insns_.reserve(100);
store_method_addr_used_ = false;
- if (kIsDebugBuild) {
for (int i = 0; i < kX86Last; i++) {
- if (X86Mir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
- }
- }
+ DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
}
if (cu_->target64) {
rs_rX86_SP = rs_rX86_SP_64;
@@ -821,20 +800,6 @@
rs_rX86_FARG5 = rs_fr5;
rs_rX86_FARG6 = rs_fr6;
rs_rX86_FARG7 = rs_fr7;
- rX86_ARG0 = rDI;
- rX86_ARG1 = rSI;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rCX;
- rX86_ARG4 = r8;
- rX86_ARG5 = r9;
- rX86_FARG0 = fr0;
- rX86_FARG1 = fr1;
- rX86_FARG2 = fr2;
- rX86_FARG3 = fr3;
- rX86_FARG4 = fr4;
- rX86_FARG5 = fr5;
- rX86_FARG6 = fr6;
- rX86_FARG7 = fr7;
rs_rX86_INVOKE_TGT = rs_rDI;
} else {
rs_rX86_SP = rs_rX86_SP_32;
@@ -853,14 +818,6 @@
rs_rX86_FARG5 = RegStorage::InvalidReg();
rs_rX86_FARG6 = RegStorage::InvalidReg();
rs_rX86_FARG7 = RegStorage::InvalidReg();
- rX86_ARG0 = rAX;
- rX86_ARG1 = rCX;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rBX;
- rX86_FARG0 = rAX;
- rX86_FARG1 = rCX;
- rX86_FARG2 = rDX;
- rX86_FARG3 = rBX;
rs_rX86_INVOKE_TGT = rs_rAX;
// TODO(64): Initialize with invalid reg
// rX86_ARG4 = RegStorage::InvalidReg();
@@ -869,10 +826,6 @@
rs_rX86_RET0 = rs_rAX;
rs_rX86_RET1 = rs_rDX;
rs_rX86_COUNT = rs_rCX;
- rX86_RET0 = rAX;
- rX86_RET1 = rDX;
- rX86_INVOKE_TGT = rAX;
- rX86_COUNT = rCX;
}
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -882,8 +835,9 @@
// Not used in x86(-64)
RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline);
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -1548,46 +1502,46 @@
ReturnVectorRegisters(mir);
break;
case kMirOpConstVector:
- GenConst128(bb, mir);
+ GenConst128(mir);
break;
case kMirOpMoveVector:
- GenMoveVector(bb, mir);
+ GenMoveVector(mir);
break;
case kMirOpPackedMultiply:
- GenMultiplyVector(bb, mir);
+ GenMultiplyVector(mir);
break;
case kMirOpPackedAddition:
- GenAddVector(bb, mir);
+ GenAddVector(mir);
break;
case kMirOpPackedSubtract:
- GenSubtractVector(bb, mir);
+ GenSubtractVector(mir);
break;
case kMirOpPackedShiftLeft:
- GenShiftLeftVector(bb, mir);
+ GenShiftLeftVector(mir);
break;
case kMirOpPackedSignedShiftRight:
- GenSignedShiftRightVector(bb, mir);
+ GenSignedShiftRightVector(mir);
break;
case kMirOpPackedUnsignedShiftRight:
- GenUnsignedShiftRightVector(bb, mir);
+ GenUnsignedShiftRightVector(mir);
break;
case kMirOpPackedAnd:
- GenAndVector(bb, mir);
+ GenAndVector(mir);
break;
case kMirOpPackedOr:
- GenOrVector(bb, mir);
+ GenOrVector(mir);
break;
case kMirOpPackedXor:
- GenXorVector(bb, mir);
+ GenXorVector(mir);
break;
case kMirOpPackedAddReduce:
- GenAddReduceVector(bb, mir);
+ GenAddReduceVector(mir);
break;
case kMirOpPackedReduce:
- GenReduceVector(bb, mir);
+ GenReduceVector(mir);
break;
case kMirOpPackedSet:
- GenSetVector(bb, mir);
+ GenSetVector(mir);
break;
case kMirOpMemBarrier:
GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
@@ -1638,7 +1592,7 @@
}
}
-void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
+void X86Mir2Lir::GenConst128(MIR* mir) {
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
Clobber(rs_dest);
@@ -1689,7 +1643,7 @@
load->target = data_target;
}
-void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMoveVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1805,7 +1759,7 @@
NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
}
-void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1839,7 +1793,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1874,7 +1828,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSubtractVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1909,7 +1863,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
// Destination does not need clobbered because it has already been as part
// of the general packed shift handler (caller of this method).
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1953,7 +1907,7 @@
AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
}
-void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1973,7 +1927,7 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector shift left " << opsize;
@@ -1982,7 +1936,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1999,18 +1953,18 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
case k64:
// TODO Implement emulated shift algorithm.
default:
LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
- break;
+ UNREACHABLE();
}
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2030,7 +1984,7 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
@@ -2039,7 +1993,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAndVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2048,7 +2002,7 @@
NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenOrVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2057,7 +2011,7 @@
NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenXorVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2084,7 +2038,7 @@
AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
}
-void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
bool is_wide = opsize == k64 || opsize == kDouble;
@@ -2219,7 +2173,7 @@
// except the rhs is not a VR but a physical register allocated above.
// No load of source VR is done because it assumes that rl_result will
// share physical register / memory location.
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -2232,7 +2186,7 @@
}
}
-void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegLocation rl_dest = mir_graph_->GetDest(mir);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
@@ -2286,7 +2240,7 @@
} else {
int extract_index = mir->dalvikInsn.arg[0];
int extr_opcode = 0;
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
// Handle the rest of integral types now.
switch (opsize) {
@@ -2302,7 +2256,7 @@
break;
default:
LOG(FATAL) << "Unsupported vector reduce " << opsize;
- return;
+ UNREACHABLE();
}
if (rl_result.location == kLocPhysReg) {
@@ -2331,7 +2285,7 @@
}
}
-void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSetVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2406,11 +2360,13 @@
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}
@@ -2747,7 +2703,7 @@
bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
- ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ ScopedMemRefType mem_ref_type2(this, ResourceMask::kDalvikReg);
if (src_is_16b_aligned) {
ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
} else if (src_is_8b_aligned) {
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 8d5dabc..cb9a24a 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -54,14 +54,16 @@
}
bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
+ UNUSED(value);
return true;
}
bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
- return false;
+ return value == 0;
}
bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return true;
}
@@ -934,13 +936,14 @@
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) {
- LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
- offset, check_value);
- if (compare != nullptr) {
- *compare = inst;
- }
- LIR* branch = OpCondBranch(cond, target);
- return branch;
+ UNUSED(temp_reg); // Comparison performed directly with memory.
+ LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+ offset, check_value);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
+ LIR* branch = OpCondBranch(cond, target);
+ return branch;
}
void X86Mir2Lir::AnalyzeMIR() {
@@ -965,13 +968,13 @@
}
}
-void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
+void X86Mir2Lir::AnalyzeBB(BasicBlock* bb) {
if (bb->block_type == kDead) {
// Ignore dead blocks
return;
}
- for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
AnalyzeExtendedMIR(opcode, bb, mir);
@@ -982,7 +985,7 @@
}
-void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir) {
switch (opcode) {
// Instructions referencing doubles.
case kMirOpFusedCmplDouble:
@@ -1009,7 +1012,7 @@
}
}
-void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir) {
// Looking for
// - Do we need a pointer to the code (used for packed switches and double lits)?
@@ -1046,7 +1049,8 @@
}
}
-void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
// Look at all the uses, and see if they are double constants.
uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
int next_sreg = 0;
@@ -1080,7 +1084,7 @@
}
}
-RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
loc = UpdateLoc(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1094,7 +1098,7 @@
return loc;
}
-RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
loc = UpdateLocWide(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1108,7 +1112,8 @@
return loc;
}
-void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(opcode, bb);
// For now this is only actual for x86-32.
if (cu_->target64) {
return;
@@ -1132,6 +1137,7 @@
}
LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
if (cu_->target64) {
return OpThreadMem(op, GetThreadOffset<8>(trampoline));
} else {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 22a2f30..afdc244 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -313,25 +313,6 @@
constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
-extern X86NativeRegisterPool rX86_ARG0;
-extern X86NativeRegisterPool rX86_ARG1;
-extern X86NativeRegisterPool rX86_ARG2;
-extern X86NativeRegisterPool rX86_ARG3;
-extern X86NativeRegisterPool rX86_ARG4;
-extern X86NativeRegisterPool rX86_ARG5;
-extern X86NativeRegisterPool rX86_FARG0;
-extern X86NativeRegisterPool rX86_FARG1;
-extern X86NativeRegisterPool rX86_FARG2;
-extern X86NativeRegisterPool rX86_FARG3;
-extern X86NativeRegisterPool rX86_FARG4;
-extern X86NativeRegisterPool rX86_FARG5;
-extern X86NativeRegisterPool rX86_FARG6;
-extern X86NativeRegisterPool rX86_FARG7;
-extern X86NativeRegisterPool rX86_RET0;
-extern X86NativeRegisterPool rX86_RET1;
-extern X86NativeRegisterPool rX86_INVOKE_TGT;
-extern X86NativeRegisterPool rX86_COUNT;
-
extern RegStorage rs_rX86_ARG0;
extern RegStorage rs_rX86_ARG1;
extern RegStorage rs_rX86_ARG2;
@@ -674,6 +655,7 @@
kX86RepneScasw, // repne scasw
kX86Last
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
/* Instruction assembly field_loc kind */
enum X86EncodingKind {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 706933a..4a84ff2 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_REG_STORAGE_H_
#include "base/logging.h"
+#include "base/value_object.h"
#include "compiler_enums.h" // For WideKind
namespace art {
@@ -72,7 +73,7 @@
* records.
*/
-class RegStorage {
+class RegStorage : public ValueObject {
public:
enum RegStorageKind {
kValidMask = 0x8000,
@@ -112,7 +113,7 @@
}
constexpr RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg)
: reg_(
- DCHECK_CONSTEXPR(rs_kind == k64BitPair, << rs_kind, 0u)
+ DCHECK_CONSTEXPR(rs_kind == k64BitPair, << static_cast<int>(rs_kind), 0u)
DCHECK_CONSTEXPR((low_reg & kFloatingPoint) == (high_reg & kFloatingPoint),
<< low_reg << ", " << high_reg, 0u)
DCHECK_CONSTEXPR((high_reg & kRegNumMask) <= kHighRegNumMask,
@@ -331,9 +332,8 @@
case k256BitSolo: return 32;
case k512BitSolo: return 64;
case k1024BitSolo: return 128;
- default: LOG(FATAL) << "Unexpected shape";
+ default: LOG(FATAL) << "Unexpected shape"; UNREACHABLE();
}
- return 0;
}
private:
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 412f85d..d3d76ba 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -539,8 +539,7 @@
for (BasicBlockId pred_id : bb->predecessors) {
BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
- int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
- uses[idx] = ssa_reg;
+ uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
incoming[idx] = pred_id;
idx++;
}
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a8e6b3c..4929b5b 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -106,6 +106,8 @@
if (use_sea) {
return true;
}
+#else
+ UNUSED(method_ref);
#endif
if (!compiler_options_->IsCompilationEnabled()) {
return false;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 05785a8..aab94c0 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -718,9 +718,9 @@
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(*dex_file)));
- Handle<mirror::Class> klass(hs.NewHandle(
+ StackHandleScope<2> hs2(self);
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->FindDexCache(*dex_file)));
+ Handle<mirror::Class> klass(hs2.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
if (klass.Get() == nullptr) {
@@ -757,13 +757,13 @@
}
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- MaybeAddToImageClasses(hs.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
+ StackHandleScope<1> hs2(self);
+ MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)),
image_classes);
}
if (klass->IsArrayClass()) {
- StackHandleScope<1> hs(self);
- MaybeAddToImageClasses(hs.NewHandle(klass->GetComponentType()), image_classes);
+ StackHandleScope<1> hs2(self);
+ MaybeAddToImageClasses(hs2.NewHandle(klass->GetComponentType()), image_classes);
}
klass.Assign(klass->GetSuperClass());
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 65a842d..682b17a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -75,6 +75,7 @@
kRequired, // Dex-to-dex compilation required for correctness.
kOptimize // Perform required transformation and peep-hole optimizations.
};
+std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs);
class CompilerDriver {
public:
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 9ae9bd4..5a0ec2f 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -86,11 +86,11 @@
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
CHECK(c != NULL);
- for (size_t i = 0; i < c->NumDirectMethods(); i++) {
- MakeExecutable(c->GetDirectMethod(i));
+ for (size_t j = 0; j < c->NumDirectMethods(); j++) {
+ MakeExecutable(c->GetDirectMethod(j));
}
- for (size_t i = 0; i < c->NumVirtualMethods(); i++) {
- MakeExecutable(c->GetVirtualMethod(i));
+ for (size_t j = 0; j < c->NumVirtualMethods(); j++) {
+ MakeExecutable(c->GetVirtualMethod(j));
}
}
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index fb7aeb9..0592f0c 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -21,6 +21,7 @@
#include <vector>
#include "base/macros.h"
+#include "globals.h"
namespace art {
@@ -242,6 +243,7 @@
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
+std::ostream& operator<<(std::ostream& os, const CompilerOptions::CompilerFilter& rhs);
} // namespace art
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index c32bdb4..7f30565 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -494,7 +494,7 @@
output_(output) {}
protected:
- bool DoActualWrite(File* elf_file) OVERRIDE {
+ bool DoActualWrite(File* elf_file ATTRIBUTE_UNUSED) OVERRIDE {
// All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
// piece. This is for future flexibility.
UNUSED(output_);
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index c75d8f8..25cf086 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -222,9 +222,9 @@
typename Elf_Phdr, typename Elf_Shdr>
bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files_unused,
- const std::string& android_root_unused,
- bool is_host_unused) {
+ const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
+ const std::string& android_root_unused ATTRIBUTE_UNUSED,
+ bool is_host_unused ATTRIBUTE_UNUSED) {
constexpr bool debug = false;
const OatHeader& oat_header = oat_writer->GetOatHeader();
Elf_Word oat_data_size = oat_header.GetExecutableOffset();
@@ -686,6 +686,13 @@
symtab->AddSymbol(it->method_name_, &builder->GetTextBuilder(), it->low_pc_, true,
it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
+ // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
+ // instructions, so that disassembler tools can correctly disassemble.
+ if (it->compiled_method_->GetInstructionSet() == kThumb2) {
+ symtab->AddSymbol("$t", &builder->GetTextBuilder(), it->low_pc_ & ~1, true,
+ 0, STB_LOCAL, STT_NOTYPE);
+ }
+
// Include CFI for compiled method, if possible.
if (cfi_info.get() != nullptr) {
DCHECK(it->compiled_method_ != nullptr);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 861a182..2fd5a52 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -265,7 +265,7 @@
return true;
}
-void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
+void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
}
@@ -661,7 +661,8 @@
}
}
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
+ mirror::Reference* ref ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 0fea2a7..2755442 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -530,7 +530,7 @@
// point return value would be in xmm0. We use log, to somehow ensure
// the compiler will use the floating point stack.
-jdouble Java_MyClassNatives_logD(JNIEnv* env, jclass klass, jdouble x) {
+jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) {
return log(x);
}
@@ -544,7 +544,7 @@
JNI_TEST(RunStaticLogDoubleMethod)
-jfloat Java_MyClassNatives_logF(JNIEnv* env, jclass klass, jfloat x) {
+jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) {
return logf(x);
}
@@ -558,15 +558,15 @@
JNI_TEST(RunStaticLogFloatMethod)
-jboolean Java_MyClassNatives_returnTrue(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnTrue(JNIEnv*, jclass) {
return JNI_TRUE;
}
-jboolean Java_MyClassNatives_returnFalse(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnFalse(JNIEnv*, jclass) {
return JNI_FALSE;
}
-jint Java_MyClassNatives_returnInt(JNIEnv* env, jclass klass) {
+jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) {
return 42;
}
@@ -785,9 +785,9 @@
EXPECT_EQ(11, trace_array->GetLength());
// Check stack trace entries have expected values
- for (int32_t i = 0; i < trace_array->GetLength(); ++i) {
- EXPECT_EQ(-2, trace_array->Get(i)->GetLineNumber());
- mirror::StackTraceElement* ste = trace_array->Get(i);
+ for (int32_t j = 0; j < trace_array->GetLength(); ++j) {
+ EXPECT_EQ(-2, trace_array->Get(j)->GetLineNumber());
+ mirror::StackTraceElement* ste = trace_array->Get(j);
EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str());
EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str());
EXPECT_STREQ("fooI", ste->GetMethodName()->ToModifiedUtf8().c_str());
@@ -1056,7 +1056,10 @@
JNI_TEST(CompileAndRunFloatFloatMethod)
-void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
+void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED,
+ jobject thisObj ATTRIBUTE_UNUSED,
+ jint i1 ATTRIBUTE_UNUSED,
+ jlong l1 ATTRIBUTE_UNUSED) {
// EXPECT_EQ(kNative, Thread::Current()->GetState());
// EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
// EXPECT_TRUE(thisObj != nullptr);
@@ -1520,7 +1523,7 @@
JNI_TEST(WithoutImplementation)
-void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1, jint i2, jint i3,
+void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3,
jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4,
jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9,
@@ -1591,7 +1594,7 @@
JNI_TEST(StackArgsIntsFirst)
-void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv* env, jclass klass, jfloat f1, jfloat f2,
+void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat f2,
jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7,
jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2,
jint i3, jint i4, jint i5, jint i6, jint i7, jint i8,
@@ -1662,7 +1665,7 @@
JNI_TEST(StackArgsFloatsFirst)
-void Java_MyClassNatives_stackArgsMixed(JNIEnv* env, jclass klass, jint i1, jfloat f1, jint i2,
+void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jint i2,
jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5,
jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8,
jfloat f8, jint i9, jfloat f9, jint i10, jfloat f10) {
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 9545896..769cd4c 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -34,8 +34,8 @@
D0, D1, D2, D3, D4, D5, D6, D7
};
-COMPILE_ASSERT(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters),
- ks_d_argument_registers_mismatch);
+static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters),
+ "ks d argument registers mismatch");
// Calling convention
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 525f05c..a100552 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -38,6 +38,7 @@
}
static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+ UNUSED(jni);
if (shorty[0] == 'F' || shorty[0] == 'D') {
return X86_64ManagedRegister::FromXmmRegister(XMM0);
} else if (shorty[0] == 'J') {
diff --git a/compiler/llvm/llvm_compiler.cc b/compiler/llvm/llvm_compiler.cc
index 55af614..fa93e00 100644
--- a/compiler/llvm/llvm_compiler.cc
+++ b/compiler/llvm/llvm_compiler.cc
@@ -16,6 +16,7 @@
#include "llvm_compiler.h"
+#include "base/macros.h"
#ifdef ART_USE_PORTABLE_COMPILER
#include "compiler.h"
#include "compiler_llvm.h"
@@ -152,9 +153,10 @@
Compiler* CreateLLVMCompiler(CompilerDriver* driver) {
#ifdef ART_USE_PORTABLE_COMPILER
- return new llvm::LLVMCompiler(driver);
+ return new llvm::LLVMCompiler(driver);
#else
- return nullptr;
+ UNUSED(driver);
+ return nullptr;
#endif
}
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 4cb7d9c..97b7cc9 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -113,8 +113,8 @@
timer_.get(), ""));
jobject class_loader = nullptr;
if (kCompile) {
- TimingLogger timings("OatTest::WriteRead", false, false);
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
+ TimingLogger timings2("OatTest::WriteRead", false, false);
+ compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
}
ScratchFile tmp;
@@ -174,12 +174,12 @@
oat_class.GetType()) << descriptor;
size_t method_index = 0;
- for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) {
- CheckMethod(klass->GetDirectMethod(i),
+ for (size_t j = 0; j < klass->NumDirectMethods(); j++, method_index++) {
+ CheckMethod(klass->GetDirectMethod(j),
oat_class.GetOatMethod(method_index), dex_file);
}
- for (size_t i = 0; i < num_virtual_methods; i++, method_index++) {
- CheckMethod(klass->GetVirtualMethod(i),
+ for (size_t j = 0; j < num_virtual_methods; j++, method_index++) {
+ CheckMethod(klass->GetVirtualMethod(j),
oat_class.GetOatMethod(method_index), dex_file);
}
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index be52f40..659c332 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -70,16 +70,18 @@
public:
NoRelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no patches expected.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no patches expected.
}
- void Patch(std::vector<uint8_t>* code, uint32_t literal_offset, uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ void Patch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, uint32_t literal_offset ATTRIBUTE_UNUSED,
+ uint32_t patch_offset ATTRIBUTE_UNUSED,
+ uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
LOG(FATAL) << "Unexpected relative patch.";
}
@@ -91,11 +93,12 @@
public:
X86RelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no limit on relative call distance.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no limit on relative call distance.
}
@@ -648,7 +651,7 @@
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it) {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// num_non_null_compiled_methods_ since we only want to allocate
@@ -860,7 +863,7 @@
: OatDexMethodVisitor(writer, offset) {
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1291,9 +1294,9 @@
// Update oat_dex_files_.
auto oat_class_it = oat_classes_.begin();
for (OatDexFile* oat_dex_file : oat_dex_files_) {
- for (uint32_t& offset : oat_dex_file->methods_offsets_) {
+ for (uint32_t& method_offset : oat_dex_file->methods_offsets_) {
DCHECK(oat_class_it != oat_classes_.end());
- offset = (*oat_class_it)->offset_;
+ method_offset = (*oat_class_it)->offset_;
++oat_class_it;
}
oat_dex_file->UpdateChecksum(oat_header_);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index a1e61b9..5b61f21 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -231,10 +231,10 @@
// data to write
- COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
+ static_assert(mirror::Class::Status::kStatusMax < (2 ^ 16), "class status won't fit in 16bits");
int16_t status_;
- COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+ static_assert(OatClassType::kOatClassMax < (2 ^ 16), "oat_class type won't fit in 16bits");
uint16_t type_;
uint32_t method_bitmap_size_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index e4ccd96..d168fc8 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -505,11 +505,11 @@
}
HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, is_referrers_class, is_initialized, dex_offset);
+ storage_index, is_referrers_class, dex_offset);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
- if (constant->NeedsInitialization()) {
+ if (!is_initialized) {
cls = new (arena_) HClinitCheck(constant, dex_offset);
current_block_->AddInstruction(cls);
}
@@ -1173,6 +1173,35 @@
break;
}
+ case Instruction::CONST_STRING: {
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_offset));
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CONST_STRING_JUMBO: {
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_offset));
+ UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CONST_CLASS: {
+ uint16_t type_index = instruction.VRegB_21c();
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ current_block_->AddInstruction(
+ new (arena_) HLoadClass(instruction.VRegB_21c(), is_referrers_class, dex_offset));
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ break;
+ }
+
default:
return false;
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c61e991..ac72a33 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -122,8 +122,8 @@
return -1;
}
-size_t CodeGenerator::FindTwoFreeConsecutiveEntries(bool* array, size_t length) {
- for (size_t i = 0; i < length - 1; ++i) {
+size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
+ for (size_t i = 0; i < length - 1; i += 2) {
if (!array[i] && !array[i + 1]) {
array[i] = true;
array[i + 1] = true;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index bf9d2c0..01c5cc9 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -51,7 +51,7 @@
uintptr_t native_pc;
};
-class SlowPathCode : public ArenaObject {
+class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
SlowPathCode() {}
virtual ~SlowPathCode() {}
@@ -62,7 +62,7 @@
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
-class CodeGenerator : public ArenaObject {
+class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
@@ -115,12 +115,14 @@
// Restores the register from the stack. Returns the size taken on stack.
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
@@ -190,7 +192,7 @@
virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
static size_t FindFreeEntry(bool* array, size_t length);
- static size_t FindTwoFreeConsecutiveEntries(bool* array, size_t length);
+ static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a06860a..6e6d64c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -170,32 +170,88 @@
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
};
-class ClinitCheckSlowPathARM : public SlowPathCodeARM {
+class LoadClassSlowPathARM : public SlowPathCodeARM {
public:
- explicit ClinitCheckSlowPathARM(HClinitCheck* instruction) : instruction_(instruction) {}
+ LoadClassSlowPathARM(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- codegen->SaveLiveRegisters(instruction_->GetLocations());
+ codegen->SaveLiveRegisters(locations);
- HLoadClass* cls = instruction_->GetLoadClass();
InvokeRuntimeCallingConvention calling_convention;
- __ LoadImmediate(calling_convention.GetRegisterAt(0), cls->GetTypeIndex());
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInitializeStaticStorage), instruction_, instruction_->GetDexPc());
- arm_codegen->Move32(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(R0));
- codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ int32_t entry_point_offset = do_clinit_
+ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+
+ // Move the class to the desired location.
+ if (locations->Out().IsValid()) {
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ }
+ codegen->RestoreLiveRegisters(locations);
__ b(GetExitLabel());
}
private:
- HClinitCheck* const instruction_;
+ // The class this slow path will load.
+ HLoadClass* const cls_;
- DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathARM);
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
};
+class LoadStringSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0));
+ __ LoadImmediate(calling_convention.GetRegisterAt(1), instruction_->GetStringIndex());
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
+};
+
+#undef __
+
#undef __
#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
@@ -296,7 +352,8 @@
}
case Primitive::kPrimDouble: {
- int reg = FindTwoFreeConsecutiveEntries(blocked_fpu_registers_, kNumberOfSRegisters);
+ int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
+ DCHECK_EQ(reg % 2, 0);
return Location::FpuRegisterPairLocation(reg, reg + 1);
}
@@ -341,6 +398,14 @@
blocked_fpu_registers_[S21] = true;
blocked_fpu_registers_[S22] = true;
blocked_fpu_registers_[S23] = true;
+ blocked_fpu_registers_[S24] = true;
+ blocked_fpu_registers_[S25] = true;
+ blocked_fpu_registers_[S26] = true;
+ blocked_fpu_registers_[S27] = true;
+ blocked_fpu_registers_[S28] = true;
+ blocked_fpu_registers_[S29] = true;
+ blocked_fpu_registers_[S30] = true;
+ blocked_fpu_registers_[S31] = true;
UpdateBlockedPairRegisters();
}
@@ -446,7 +511,7 @@
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(stack_index);
+ return Location::QuickParameter(index, stack_index);
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
@@ -561,12 +626,13 @@
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ Mov(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ LoadFromOffset(kLoadWord, destination.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
} else {
DCHECK(source.IsDoubleStackSlot());
if (destination.AsRegisterPairLow<Register>() == R1) {
@@ -588,20 +654,21 @@
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegisterPair()) {
- __ Mov(calling_convention.GetRegisterAt(argument_index),
+ __ Mov(calling_convention.GetRegisterAt(register_index),
source.AsRegisterPairLow<Register>());
__ StoreToOffset(kStoreWord, source.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1));
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else {
DCHECK(source.IsDoubleStackSlot());
__ LoadFromOffset(
- kLoadWord, calling_convention.GetRegisterAt(argument_index), SP, source.GetStackIndex());
+ kLoadWord, calling_convention.GetRegisterAt(register_index), SP, source.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0, SP, source.GetHighStackIndex(kArmWordSize));
- __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(stack_index + 1));
}
} else {
DCHECK(destination.IsDoubleStackSlot());
@@ -616,11 +683,12 @@
}
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
- __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(argument_index),
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
+ __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(register_index),
SP, destination.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0,
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
__ StoreToOffset(kStoreWord, R0, SP, destination.GetHighStackIndex(kArmWordSize));
} else if (source.IsFpuRegisterPair()) {
__ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
@@ -751,6 +819,7 @@
}
void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ bkpt(0);
@@ -916,6 +985,7 @@
void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -943,6 +1013,7 @@
}
void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -953,6 +1024,7 @@
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -963,6 +1035,7 @@
void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -973,6 +1046,7 @@
void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -983,6 +1057,7 @@
void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
@@ -990,6 +1065,7 @@
}
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1000,6 +1076,7 @@
}
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1465,6 +1542,7 @@
void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM::VisitNot(HNot* not_) {
@@ -1508,7 +1586,6 @@
}
void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
- Label greater, done;
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
@@ -1553,6 +1630,7 @@
}
void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1955,9 +2033,11 @@
void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -2087,21 +2167,38 @@
}
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
Register out = cls->GetLocations()->Out().As<Register>();
if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
__ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
} else {
+ DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ LoadFromOffset(
kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ cmp(out, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
}
@@ -2115,17 +2212,15 @@
}
void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
- SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathARM(check);
+ // We assume the class is not null.
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
- LocationSummary* locations = check->GetLocations();
- // We remove the class as a live register, we know it's null or unused in the slow path.
- RegisterSet* register_set = locations->GetLiveRegisters();
- register_set->Remove(locations->InAt(0));
-
- Register class_reg = locations->InAt(0).As<Register>();
- __ cmp(class_reg, ShifterOperand(0));
- __ b(slow_path->GetEntryLabel(), EQ);
+void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
+ SlowPathCodeARM* slow_path, Register class_reg) {
__ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
__ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
__ b(slow_path->GetEntryLabel(), LT);
@@ -2258,5 +2353,25 @@
}
}
+void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = load->GetLocations()->Out().As<Register>();
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(
+ kLoadWord, out, out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ __ cmp(out, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index c65b426..5076a4b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -26,6 +26,7 @@
namespace arm {
class CodeGeneratorARM;
+class SlowPathCodeARM;
static constexpr size_t kArmWordSize = 4;
@@ -131,6 +132,7 @@
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
ArmAssembler* const assembler_;
CodeGeneratorARM* const codegen_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index fe999c2..90d7c35 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -538,8 +538,8 @@
M(DoubleConstant) \
M(Div) \
M(FloatConstant) \
- M(Mul) \
M(LoadClass) \
+ M(LoadString) \
M(Neg) \
M(NewArray) \
M(ParallelMove) \
@@ -556,6 +556,7 @@
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ UNUSED(instr); \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -711,6 +712,7 @@
}
void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
__ Brk(0); // TODO: Introduce special markers for such code locations.
@@ -758,16 +760,16 @@
// the comparison and its condition as the branch condition.
Register lhs = InputRegisterAt(condition, 0);
Operand rhs = InputOperandAt(condition, 1);
- Condition cond = ARM64Condition(condition->GetCondition());
- if ((cond == eq || cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
- if (cond == eq) {
+ Condition arm64_cond = ARM64Condition(condition->GetCondition());
+ if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ if (arm64_cond == eq) {
__ Cbz(lhs, true_target);
} else {
__ Cbnz(lhs, true_target);
}
} else {
__ Cmp(lhs, rhs);
- __ B(cond, true_target);
+ __ B(arm64_cond, true_target);
}
}
@@ -877,6 +879,7 @@
void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
@@ -967,6 +970,7 @@
void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM64::VisitLocal(HLocal* local) {
@@ -984,6 +988,45 @@
void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderARM64::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -1071,6 +1114,7 @@
void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
@@ -1082,6 +1126,7 @@
}
void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1126,6 +1171,7 @@
}
void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ UNUSED(instruction);
codegen_->GenerateFrameExit();
__ Br(lr);
}
@@ -1153,6 +1199,7 @@
}
void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -1204,6 +1251,7 @@
void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
} // namespace arm64
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a4003ff..5530f46 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -180,11 +180,15 @@
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
return 0;
}
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
return 0;
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 267edca..1e37909 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -157,30 +157,83 @@
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
};
-class ClinitCheckSlowPathX86 : public SlowPathCodeX86 {
+class LoadStringSlowPathX86 : public SlowPathCodeX86 {
public:
- explicit ClinitCheckSlowPathX86(HClinitCheck* instruction) : instruction_(instruction) {}
+ explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- codegen->SaveLiveRegisters(instruction_->GetLocations());
+ codegen->SaveLiveRegisters(locations);
- HLoadClass* cls = instruction_->GetLoadClass();
InvokeRuntimeCallingConvention calling_convention;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(cls->GetTypeIndex()));
- x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)));
+ x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0));
+ __ movl(calling_convention.GetRegisterAt(1), Immediate(instruction_->GetStringIndex()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x86_codegen->Move32(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(EAX));
- codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ codegen->RestoreLiveRegisters(locations);
+
__ jmp(GetExitLabel());
}
private:
- HClinitCheck* const instruction_;
+ HLoadString* const instruction_;
- DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathX86);
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
+};
+
+class LoadClassSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ LoadClassSlowPathX86(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
+ x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ __ fs()->call(Address::Absolute(do_clinit_
+ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
+ : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
+ codegen->RecordPcInfo(at_, dex_pc_);
+
+ // Move the class to the desired location.
+ if (locations->Out().IsValid()) {
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
};
#undef __
@@ -393,7 +446,9 @@
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(index);
+ // On X86, the register index and stack index of a quick parameter is the same, since
+ // we are passing floating pointer values in core registers.
+ return Location::QuickParameter(index, index);
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -453,12 +508,13 @@
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ movl(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ movl(destination.AsRegisterPairHigh<Register>(), Address(ESP,
- calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
+ calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
__ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex()));
@@ -467,19 +523,20 @@
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegister()) {
- __ movl(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
- __ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)),
+ __ movl(calling_convention.GetRegisterAt(register_index), source.AsRegisterPairLow<Register>());
+ __ movl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)),
source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movl(calling_convention.GetRegisterAt(argument_index),
+ __ movl(calling_convention.GetRegisterAt(register_index),
Address(ESP, source.GetStackIndex()));
__ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
- __ popl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)));
+ __ popl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)));
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
@@ -495,10 +552,11 @@
source.AsRegisterPairHigh<Register>());
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
__ movl(Address(ESP, destination.GetStackIndex()),
- calling_convention.GetRegisterAt(argument_index));
- DCHECK_EQ(calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize(),
+ calling_convention.GetRegisterAt(register_index));
+ DCHECK_EQ(calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize(),
static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize)));
} else if (source.IsFpuRegister()) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
@@ -611,6 +669,7 @@
}
void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -700,6 +759,7 @@
void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -728,6 +788,7 @@
}
void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86::VisitCondition(HCondition* comp) {
@@ -817,6 +878,7 @@
void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -827,6 +889,7 @@
void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -837,6 +900,7 @@
void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -847,6 +911,7 @@
void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -854,6 +919,7 @@
}
void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1422,6 +1488,7 @@
}
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
+ UNUSED(instruction);
}
void LocationsBuilderX86::VisitNot(HNot* not_) {
@@ -1464,7 +1531,6 @@
}
void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) {
- Label greater, done;
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
@@ -1516,6 +1582,7 @@
}
void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1992,9 +2059,11 @@
void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -2135,20 +2204,37 @@
}
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
Register out = cls->GetLocations()->Out().As<Register>();
if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
+ DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
}
@@ -2162,17 +2248,15 @@
}
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
- SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathX86(check);
+ // We assume the class to not be null.
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
- LocationSummary* locations = check->GetLocations();
- // We remove the class as a live register, we know it's null or unused in the slow path.
- RegisterSet* register_set = locations->GetLiveRegisters();
- register_set->Remove(locations->InAt(0));
-
- Register class_reg = locations->InAt(0).As<Register>();
- __ testl(class_reg, class_reg);
- __ j(kEqual, slow_path->GetEntryLabel());
+void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
+ SlowPathCodeX86* slow_path, Register class_reg) {
__ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
Immediate(mirror::Class::kStatusInitialized));
__ j(kLess, slow_path->GetEntryLabel());
@@ -2316,5 +2400,24 @@
}
}
+void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = load->GetLocations()->Out().As<Register>();
+ codegen_->LoadCurrentMethod(out);
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index bcceaad..176a269 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -28,6 +28,7 @@
static constexpr size_t kX86WordSize = 4;
class CodeGeneratorX86;
+class SlowPathCodeX86;
static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX };
static constexpr RegisterPair kParameterCorePairRegisters[] = { ECX_EDX, EDX_EBX };
@@ -126,6 +127,7 @@
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeX86* slow_path, Register class_reg);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e8d34e3..40eec9b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -168,32 +168,86 @@
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
};
-class ClinitCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- explicit ClinitCheckSlowPathX86_64(HClinitCheck* instruction) : instruction_(instruction) {}
+ LoadClassSlowPathX86_64(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- codegen->SaveLiveRegisters(instruction_->GetLocations());
- HLoadClass* cls = instruction_->GetLoadClass();
+ codegen->SaveLiveRegisters(locations);
+
InvokeRuntimeCallingConvention calling_convention;
- __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls->GetTypeIndex()));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
- __ gs()->call(Address::Absolute(
- QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage), true));
+ __ gs()->call(Address::Absolute((do_clinit_
+ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
+ codegen->RecordPcInfo(at_, dex_pc_);
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x64_codegen->Move(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(RAX));
- codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ // Move the class to the desired location.
+ if (locations->Out().IsValid()) {
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
+
+ codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HClinitCheck* const instruction_;
+ // The class this slow path will load.
+ HLoadClass* const cls_;
- DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathX86_64);
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
+};
+
+class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit LoadStringSlowPathX86_64(HLoadString* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(0)));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(1)),
+ Immediate(instruction_->GetStringIndex()));
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
};
#undef __
@@ -526,6 +580,7 @@
}
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -614,6 +669,7 @@
void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -641,6 +697,7 @@
}
void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
@@ -763,6 +820,7 @@
void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -773,6 +831,7 @@
void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -783,6 +842,7 @@
void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -793,6 +853,7 @@
void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -800,6 +861,7 @@
}
void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1351,6 +1413,7 @@
void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
@@ -1393,6 +1456,7 @@
}
void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -1872,9 +1936,11 @@
void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -2109,21 +2175,46 @@
__ popq(CpuRegister(reg));
}
+void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
+ SlowPathCodeX86_64* slow_path, CpuRegister class_reg) {
+ __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
+ Immediate(mirror::Class::kStatusInitialized));
+ __ j(kLess, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ // No need for memory fence, thanks to the X86_64 memory model.
+}
+
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
CpuRegister out = cls->GetLocations()->Out().As<CpuRegister>();
if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
+ DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
}
@@ -2137,22 +2228,11 @@
}
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
- SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathX86_64(check);
+ // We assume the class to not be null.
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
-
- LocationSummary* locations = check->GetLocations();
- // We remove the class as a live register, we know it's null or unused in the slow path.
- RegisterSet* register_set = locations->GetLiveRegisters();
- register_set->Remove(locations->InAt(0));
-
- CpuRegister class_reg = locations->InAt(0).As<CpuRegister>();
- __ testl(class_reg, class_reg);
- __ j(kEqual, slow_path->GetEntryLabel());
- __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(mirror::Class::kStatusInitialized));
- __ j(kLess, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- // No need for memory fence, thanks to the X86_64 memory model.
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<CpuRegister>());
}
void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
@@ -2270,5 +2350,24 @@
}
}
+void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ codegen_->AddSlowPath(slow_path);
+
+ CpuRegister out = load->GetLocations()->Out().As<CpuRegister>();
+ codegen_->LoadCurrentMethod(CpuRegister(out));
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 32d2702..0de3045 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -65,6 +65,7 @@
};
class CodeGeneratorX86_64;
+class SlowPathCodeX86_64;
class ParallelMoveResolverX86_64 : public ParallelMoveResolver {
public:
@@ -130,6 +131,7 @@
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
X86_64Assembler* const assembler_;
CodeGeneratorX86_64* const codegen_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 03951e2..68fcb25 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -373,9 +373,9 @@
PrepareForRegisterAllocation(graph).Run();
ASSERT_FALSE(equal->NeedsMaterialization());
- auto hook_before_codegen = [](HGraph* graph) {
- HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -408,11 +408,7 @@
MUL_TEST(LONG, MulLong);
#endif
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnMulIntLit8) {
-#else
TEST(CodegenTest, ReturnMulIntLit8) {
-#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
@@ -421,11 +417,7 @@
TestCode(data, true, 12);
}
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnMulIntLit16) {
-#else
TEST(CodegenTest, ReturnMulIntLit16) {
-#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
@@ -471,9 +463,9 @@
HReturn ret(&cmp_lt);
code_block->AddInstruction(&ret);
- auto hook_before_codegen = [](HGraph* graph) {
- HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -541,9 +533,9 @@
HReturn ret_ge(&cst_ge);
if_false_block->AddInstruction(&ret_ge);
- auto hook_before_codegen = [](HGraph* graph) {
- HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 10a7e46..fca9933 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -28,9 +28,9 @@
// Traverse this block's instructions in (forward) order and
// replace the ones that can be statically evaluated by a
// compile-time counterpart.
- for (HInstructionIterator it(block->GetInstructions());
- !it.Done(); it.Advance()) {
- HInstruction* inst = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions());
+ !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* inst = inst_it.Current();
if (inst->IsBinaryOperation()) {
// Constant folding: replace `op(a, b)' with a constant at
// compile time if `a' and `b' are both constants.
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 027b3d4..25168b5 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -54,8 +54,9 @@
SideEffects effects = SideEffects::None();
// Update `effects` with the side effects of all instructions in this block.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ HInstruction* instruction = inst_it.Current();
effects = effects.Union(instruction->GetSideEffects());
if (effects.HasAllSideEffects()) {
break;
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index a98d714..8d2c774 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -25,7 +25,7 @@
* A node in the collision list of a ValueSet. Encodes the instruction,
* the hash code, and the next node in the collision list.
*/
-class ValueSetNode : public ArenaObject {
+class ValueSetNode : public ArenaObject<kArenaAllocMisc> {
public:
ValueSetNode(HInstruction* instruction, size_t hash_code, ValueSetNode* next)
: instruction_(instruction), hash_code_(hash_code), next_(next) {}
@@ -52,7 +52,7 @@
* if there is one in the set. In GVN, we would say those instructions have the
* same "number".
*/
-class ValueSet : public ArenaObject {
+class ValueSet : public ArenaObject<kArenaAllocMisc> {
public:
explicit ValueSet(ArenaAllocator* allocator)
: allocator_(allocator), number_of_entries_(0), collisions_(nullptr) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 94aded6..bed688b 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -27,6 +27,9 @@
class HConstant;
class HInstruction;
+class Location;
+
+std::ostream& operator<<(std::ostream& os, const Location& location);
/**
* A Location is an abstraction over the potential location
@@ -71,16 +74,16 @@
Location() : value_(kInvalid) {
// Verify that non-constant location kinds do not interfere with kConstant.
- COMPILE_ASSERT((kInvalid & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kUnallocated & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kDoubleStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kQuickParameter & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kFpuRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegisterPair & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kFpuRegisterPair & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kConstant & kLocationConstantMask) == kConstant, TagError);
+ static_assert((kInvalid & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kQuickParameter & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kConstant & kLocationConstantMask) == kConstant, "TagError");
DCHECK(!IsValid());
}
@@ -228,13 +231,18 @@
return GetPayload() - kStackIndexBias + word_size;
}
- static Location QuickParameter(uint32_t parameter_index) {
- return Location(kQuickParameter, parameter_index);
+ static Location QuickParameter(uint16_t register_index, uint16_t stack_index) {
+ return Location(kQuickParameter, register_index << 16 | stack_index);
}
- uint32_t GetQuickParameterIndex() const {
+ uint32_t GetQuickParameterRegisterIndex() const {
DCHECK(IsQuickParameter());
- return GetPayload();
+ return GetPayload() >> 16;
+ }
+
+ uint32_t GetQuickParameterStackIndex() const {
+ DCHECK(IsQuickParameter());
+ return GetPayload() & 0xFFFF;
}
bool IsQuickParameter() const {
@@ -346,6 +354,8 @@
// way that none of them can be interpreted as a kConstant tag.
uintptr_t value_;
};
+std::ostream& operator<<(std::ostream& os, const Location::Kind& rhs);
+std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs);
class RegisterSet : public ValueObject {
public:
@@ -364,7 +374,7 @@
if (loc.IsRegister()) {
core_registers_ &= ~(1 << loc.reg());
} else {
- DCHECK(loc.IsFpuRegister());
+ DCHECK(loc.IsFpuRegister()) << loc;
floating_point_registers_ &= ~(1 << loc.reg());
}
}
@@ -396,7 +406,7 @@
* The intent is to have the code for generating the instruction independent of
* register allocation. A register allocator just has to provide a LocationSummary.
*/
-class LocationSummary : public ArenaObject {
+class LocationSummary : public ArenaObject<kArenaAllocMisc> {
public:
enum CallKind {
kNoCall,
@@ -521,8 +531,6 @@
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
-std::ostream& operator<<(std::ostream& os, const Location& location);
-
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d624ad5..8cb2ef6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -647,4 +647,16 @@
return true;
}
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs) {
+#define DECLARE_CASE(type, super) case HInstruction::k##type: os << #type; break;
+ switch (rhs) {
+ FOR_EACH_INSTRUCTION(DECLARE_CASE)
+ default:
+ os << "Unknown instruction kind " << static_cast<int>(rhs);
+ break;
+ }
+#undef DECLARE_CASE
+ return os;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 86c36b8..79638b3 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -79,12 +79,14 @@
};
// Control-flow graph of a method. Contains a list of basic blocks.
-class HGraph : public ArenaObject {
+class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
explicit HGraph(ArenaAllocator* arena)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
@@ -199,7 +201,7 @@
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
-class HLoopInformation : public ArenaObject {
+class HLoopInformation : public ArenaObject<kArenaAllocMisc> {
public:
HLoopInformation(HBasicBlock* header, HGraph* graph)
: header_(header),
@@ -278,7 +280,7 @@
// as a double linked list. Each block knows its predecessors and
// successors.
-class HBasicBlock : public ArenaObject {
+class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
@@ -489,10 +491,11 @@
M(IntConstant, Constant) \
M(InvokeStatic, Invoke) \
M(InvokeVirtual, Invoke) \
- M(LoadClass, Instruction) \
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
+ M(LoadClass, Instruction) \
M(LoadLocal, Instruction) \
+ M(LoadString, Instruction) \
M(Local, Instruction) \
M(LongConstant, Constant) \
M(Mul, BinaryOperation) \
@@ -536,7 +539,7 @@
virtual void Accept(HGraphVisitor* visitor)
template <typename T>
-class HUseListNode : public ArenaObject {
+class HUseListNode : public ArenaObject<kArenaAllocMisc> {
public:
HUseListNode(T* user, size_t index, HUseListNode* tail)
: user_(user), index_(index), tail_(tail) {}
@@ -618,7 +621,7 @@
size_t flags_;
};
-class HInstruction : public ArenaObject {
+class HInstruction : public ArenaObject<kArenaAllocMisc> {
public:
explicit HInstruction(SideEffects side_effects)
: previous_(nullptr),
@@ -737,12 +740,18 @@
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionTypeEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether two instructions are equal, that is:
// 1) They have the same type and contain the same data,
@@ -807,6 +816,7 @@
DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
template<typename T>
class HUseIterator : public ValueObject {
@@ -832,7 +842,7 @@
};
// A HEnvironment object contains the values of virtual registers at a given location.
-class HEnvironment : public ArenaObject {
+class HEnvironment : public ArenaObject<kArenaAllocMisc> {
public:
HEnvironment(ArenaAllocator* arena, size_t number_of_vregs) : vregs_(arena, number_of_vregs) {
vregs_.SetSize(number_of_vregs);
@@ -964,14 +974,14 @@
public:
intptr_t length() const { return 0; }
const T& operator[](intptr_t i) const {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
T& operator[](intptr_t i) {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
};
@@ -1109,7 +1119,10 @@
Primitive::Type GetResultType() const { return GetType(); }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1142,7 +1155,10 @@
virtual bool IsCommutative() { return false; }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1731,7 +1747,10 @@
: HUnaryOperation(result_type, input) {}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
@@ -1791,7 +1810,10 @@
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -1883,7 +1905,10 @@
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
void SetType(Primitive::Type type) { type_ = type; }
DECLARE_INSTRUCTION(ArrayGet);
@@ -1947,7 +1972,10 @@
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
DECLARE_INSTRUCTION(ArrayLength);
@@ -1965,7 +1993,10 @@
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -2021,8 +2052,6 @@
DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
-// TODO: Make this class handle the case the load is null (dex cache
-// is null).
/**
* Instruction to load a Class object.
*/
@@ -2030,13 +2059,14 @@
public:
HLoadClass(uint16_t type_index,
bool is_referrers_class,
- bool is_initialized,
uint32_t dex_pc)
: HExpression(Primitive::kPrimNot, SideEffects::None()),
type_index_(type_index),
is_referrers_class_(is_referrers_class),
- is_initialized_(is_initialized),
- dex_pc_(dex_pc) {}
+ dex_pc_(dex_pc),
+ generate_clinit_check_(false) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
return other->AsLoadClass()->type_index_ == type_index_;
@@ -2046,24 +2076,69 @@
uint32_t GetDexPc() const { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
+ bool IsReferrersClass() const { return is_referrers_class_; }
- bool NeedsInitialization() const {
- return !is_initialized_ && !is_referrers_class_;
+ bool NeedsEnvironment() const OVERRIDE {
+ // Will call runtime and load the class if the class is not loaded yet.
+ // TODO: finer grain decision.
+ return !is_referrers_class_;
}
- bool IsReferrersClass() const { return is_referrers_class_; }
+ bool MustGenerateClinitCheck() const {
+ return generate_clinit_check_;
+ }
+
+ void SetMustGenerateClinitCheck() {
+ generate_clinit_check_ = true;
+ }
+
+ bool CanCallRuntime() const {
+ return MustGenerateClinitCheck() || !is_referrers_class_;
+ }
DECLARE_INSTRUCTION(LoadClass);
private:
const uint16_t type_index_;
const bool is_referrers_class_;
- const bool is_initialized_;
const uint32_t dex_pc_;
+ // Whether this instruction must generate the initialization check.
+ // Used for code generation.
+ bool generate_clinit_check_;
DISALLOW_COPY_AND_ASSIGN(HLoadClass);
};
+class HLoadString : public HExpression<0> {
+ public:
+ HLoadString(uint32_t string_index, uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::None()),
+ string_index_(string_index),
+ dex_pc_(dex_pc) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ return other->AsLoadString()->string_index_ == string_index_;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return string_index_; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetStringIndex() const { return string_index_; }
+
+ // TODO: Can we deopt or debug when we resolve a string?
+ bool NeedsEnvironment() const OVERRIDE { return false; }
+
+ DECLARE_INSTRUCTION(LoadString);
+
+ private:
+ const uint32_t string_index_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLoadString);
+};
+
// TODO: Pass this check to HInvokeStatic nodes.
/**
* Performs an initialization check on its Class object input.
@@ -2076,6 +2151,12 @@
SetRawInputAt(0, constant);
}
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
bool NeedsEnvironment() const OVERRIDE {
// May call runtime to initialize the class.
return true;
@@ -2147,7 +2228,7 @@
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
-class MoveOperands : public ArenaObject {
+class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
: source_(source), destination_(destination), instruction_(instruction) {}
@@ -2248,7 +2329,7 @@
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) {}
+ virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 5350dcb..08b74c7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -213,6 +213,7 @@
uint32_t method_idx,
jobject class_loader,
const DexFile& dex_file) const {
+ UNUSED(invoke_type);
total_compiled_methods_++;
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
@@ -226,6 +227,10 @@
return nullptr;
}
+ if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
+ return nullptr;
+ }
+
DexCompilationUnit dex_compilation_unit(
nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
class_def_idx, method_idx, access_flags,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5b693dd..aae7f9b 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -46,7 +46,7 @@
size_t number_of_ranges,
ArenaAllocator* allocator,
int reg = -1) {
- LiveInterval* interval = new (allocator) LiveInterval(allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(allocator, Primitive::kPrimInt);
for (size_t i = number_of_ranges; i > 0; --i) {
interval->AddRange(ranges[i - 1][0], ranges[i - 1][1]);
}
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index c71d93e..1e93ece 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -130,13 +130,13 @@
// this move's source or destination needs to have their source
// changed to reflect the state of affairs after the swap.
Location source = move->GetSource();
- Location destination = move->GetDestination();
+ Location swap_destination = move->GetDestination();
move->Eliminate();
for (size_t i = 0; i < moves_.Size(); ++i) {
const MoveOperands& other_move = *moves_.Get(i);
if (other_move.Blocks(source)) {
- moves_.Get(i)->SetSource(destination);
- } else if (other_move.Blocks(destination)) {
+ moves_.Get(i)->SetSource(swap_destination);
+ } else if (other_move.Blocks(swap_destination)) {
moves_.Get(i)->SetSource(source);
}
}
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 2bdcc61..62629bc 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -50,8 +50,8 @@
<< ")";
}
- virtual void SpillScratch(int reg) {}
- virtual void RestoreScratch(int reg) {}
+ virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {}
+ virtual void RestoreScratch(int reg ATTRIBUTE_UNUSED) {}
std::string GetMessage() const {
return message_.str();
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 2387141..c4db840 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -23,8 +23,9 @@
for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
// No need to visit the phis.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- it.Current()->Accept(this);
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ inst_it.Current()->Accept(this);
}
}
}
@@ -38,7 +39,14 @@
}
void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
- check->ReplaceWith(check->InputAt(0));
+ HLoadClass* cls = check->GetLoadClass();
+ check->ReplaceWith(cls);
+ if (check->GetPrevious() == cls) {
+ // Pass the initialization duty to the `HLoadClass` instruction,
+ // and remove the instruction from the graph.
+ cls->SetMustGenerateClinitCheck();
+ check->GetBlock()->RemoveInstruction(check);
+ }
}
void PrepareForRegisterAllocation::VisitCondition(HCondition* condition) {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index f95c4a4..2a9c885 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -126,11 +126,12 @@
// is the one with the lowest start position.
for (HLinearPostOrderIterator it(liveness_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- ProcessInstruction(it.Current());
+ for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
+ back_it.Advance()) {
+ ProcessInstruction(back_it.Current());
}
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- ProcessInstruction(it.Current());
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ ProcessInstruction(inst_it.Current());
}
}
@@ -141,6 +142,10 @@
for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
LiveInterval* fixed = physical_core_register_intervals_.Get(i);
if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
inactive_.Add(fixed);
}
}
@@ -160,6 +165,10 @@
for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
inactive_.Add(fixed);
}
}
@@ -253,9 +262,6 @@
current->SetFrom(position + 1);
current->SetRegister(output.reg());
BlockRegister(output, position, position + 1);
- } else if (!locations->OutputOverlapsWithInputs()) {
- // Shift the interval's start by one to not interfere with the inputs.
- current->SetFrom(position + 1);
} else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
current->SetSpillSlot(output.GetStackIndex());
}
@@ -266,15 +272,17 @@
size_t first_register_use = current->FirstRegisterUse();
if (first_register_use != kNoLifetime) {
LiveInterval* split = Split(current, first_register_use - 1);
- // Don't add direclty to `unhandled`, it needs to be sorted and the start
+ // Don't add directly to `unhandled`, it needs to be sorted and the start
// of this new interval might be after intervals already in the list.
AddSorted(&unhandled, split);
} else {
// Nothing to do, we won't allocate a register for this value.
}
} else {
- DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek()));
- unhandled.Add(current);
+ // Don't add directly to `unhandled`, temp or safepoint intervals
+ // for this instruction may have been added, and those can be
+ // processed first.
+ AddSorted(&unhandled, current);
}
}
@@ -432,6 +440,27 @@
stream << std::endl;
}
+void RegisterAllocator::DumpAllIntervals(std::ostream& stream) const {
+ stream << "inactive: " << std::endl;
+ for (size_t i = 0; i < inactive_.Size(); i ++) {
+ DumpInterval(stream, inactive_.Get(i));
+ }
+ stream << "active: " << std::endl;
+ for (size_t i = 0; i < active_.Size(); i ++) {
+ DumpInterval(stream, active_.Get(i));
+ }
+ stream << "unhandled: " << std::endl;
+ auto unhandled = (unhandled_ != nullptr) ?
+ unhandled_ : &unhandled_core_intervals_;
+ for (size_t i = 0; i < unhandled->Size(); i ++) {
+ DumpInterval(stream, unhandled->Get(i));
+ }
+ stream << "handled: " << std::endl;
+ for (size_t i = 0; i < handled_.Size(); i ++) {
+ DumpInterval(stream, handled_.Get(i));
+ }
+}
+
// By the book implementation of a linear scan register allocator.
void RegisterAllocator::LinearScan() {
while (!unhandled_->IsEmpty()) {
@@ -441,6 +470,10 @@
DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
size_t position = current->GetStart();
+ // Remember the inactive_ size here since the ones moved to inactive_ from
+ // active_ below shouldn't need to be re-checked.
+ size_t inactive_intervals_to_handle = inactive_.Size();
+
// (2) Remove currently active intervals that are dead at this position.
// Move active intervals that have a lifetime hole at this position
// to inactive.
@@ -459,15 +492,18 @@
// (3) Remove currently inactive intervals that are dead at this position.
// Move inactive intervals that cover this position to active.
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0; i < inactive_intervals_to_handle; ++i) {
LiveInterval* interval = inactive_.Get(i);
+ DCHECK(interval->GetStart() < position || interval->IsFixed());
if (interval->IsDeadAt(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
handled_.Add(interval);
} else if (interval->Covers(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
active_.Add(interval);
}
}
@@ -506,20 +542,6 @@
free_until[i] = kMaxLifetimePosition;
}
- // For each inactive interval, set its register to be free until
- // the next intersection with `current`.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
- for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
- LiveInterval* inactive = inactive_.Get(i);
- DCHECK(inactive->HasRegister());
- size_t next_intersection = inactive->FirstIntersectionWith(current);
- if (next_intersection != kNoLifetime) {
- free_until[inactive->GetRegister()] =
- std::min(free_until[inactive->GetRegister()], next_intersection);
- }
- }
-
// For each active interval, set its register to not free.
for (size_t i = 0, e = active_.Size(); i < e; ++i) {
LiveInterval* interval = active_.Get(i);
@@ -527,6 +549,33 @@
free_until[interval->GetRegister()] = 0;
}
+ // For each inactive interval, set its register to be free until
+ // the next intersection with `current`.
+ for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+ LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
+
+ DCHECK(inactive->HasRegister());
+ if (free_until[inactive->GetRegister()] == 0) {
+ // Already used by some active interval. No need to intersect.
+ continue;
+ }
+ size_t next_intersection = inactive->FirstIntersectionWith(current);
+ if (next_intersection != kNoLifetime) {
+ free_until[inactive->GetRegister()] =
+ std::min(free_until[inactive->GetRegister()], next_intersection);
+ }
+ }
+
int reg = -1;
if (current->HasRegister()) {
// Some instructions have a fixed register output.
@@ -605,10 +654,18 @@
// For each inactive interval, find the next use of its register after the
// start of current.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
DCHECK(inactive->HasRegister());
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
@@ -660,20 +717,29 @@
}
}
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
if (inactive->GetRegister() == reg) {
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
if (inactive->IsFixed()) {
LiveInterval* split = Split(current, next_intersection);
AddSorted(unhandled_, split);
} else {
- LiveInterval* split = Split(inactive, current->GetStart());
+ LiveInterval* split = Split(inactive, next_intersection);
inactive_.DeleteAt(i);
+ --i;
+ --e;
handled_.Add(inactive);
AddSorted(unhandled_, split);
- --i;
}
}
}
@@ -812,7 +878,7 @@
HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
if (at == nullptr) {
- // Block boundary, don't no anything the connection of split siblings will handle it.
+ // Block boundary, don't do anything the connection of split siblings will handle it.
return;
}
HParallelMove* move;
@@ -973,7 +1039,14 @@
HInstruction* safepoint = safepoints_.Get(i);
size_t position = safepoint->GetLifetimePosition();
LocationSummary* locations = safepoint->GetLocations();
- if (!current->Covers(position)) continue;
+ if (!current->Covers(position)) {
+ continue;
+ }
+ if (interval->GetStart() == position) {
+ // The safepoint is for this instruction, so the location of the instruction
+ // does not need to be saved.
+ continue;
+ }
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
@@ -1016,7 +1089,7 @@
}
size_t from_position = from->GetLifetimeEnd() - 1;
- // When an instructions dies at entry of another, and the latter is the beginning
+ // When an instruction dies at entry of another, and the latter is the beginning
// of a block, the register allocator ensures the former has a register
// at block->GetLifetimeStart() + 1. Since this is at a block boundary, it must
// must be handled in this method.
@@ -1129,8 +1202,8 @@
// Resolve phi inputs. Order does not matter.
for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
- for (HInstructionIterator it(current->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* phi = it.Current();
+ for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
for (size_t i = 0, e = current->GetPredecessors().Size(); i < e; ++i) {
HBasicBlock* predecessor = current->GetPredecessors().Get(i);
DCHECK_EQ(predecessor->GetSuccessors().Size(), 1u);
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index b881539..976ee39 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -126,6 +126,7 @@
void ProcessInstruction(HInstruction* instruction);
bool ValidateInternal(bool log_fatal_on_failure) const;
void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
+ void DumpAllIntervals(std::ostream& stream) const;
ArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 2d84a9d..6845dea 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -414,21 +414,24 @@
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
unhandled->AddLoopRange(0, 60);
+ // For SSA value intervals, only an interval resulted from a split may intersect
+ // with inactive intervals.
+ unhandled = register_allocator.Split(unhandled, 5);
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(40, 50);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(20, 30);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(60, 70);
register_allocator.inactive_.Add(interval);
@@ -438,7 +441,7 @@
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_;
- register_allocator.TryAllocateFreeReg(unhandled);
+ ASSERT_TRUE(register_allocator.TryAllocateFreeReg(unhandled));
// Check that we have split the interval.
ASSERT_EQ(1u, register_allocator.unhandled_->Size());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index a0cc8a9..e83c528 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -109,8 +109,8 @@
HPhi* phi = new (GetGraph()->GetArena()) HPhi(
GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid);
for (size_t i = 0; i < block->GetPredecessors().Size(); i++) {
- HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(i), local);
- phi->SetRawInputAt(i, value);
+ HInstruction* pred_value = ValueOfLocal(block->GetPredecessors().Get(i), local);
+ phi->SetRawInputAt(i, pred_value);
}
block->AddPhi(phi);
value = phi;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 1e34670..0085b27 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -107,15 +107,15 @@
HBasicBlock* block = it.Current();
block->SetLifetimeStart(lifetime_position);
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
current->Accept(location_builder);
LocationSummary* locations = current->GetLocations();
if (locations != nullptr && locations->Out().IsValid()) {
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -124,15 +124,16 @@
// Add a null marker to notify we are starting a block.
instructions_from_lifetime_position_.Add(nullptr);
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
current->Accept(codegen_->GetLocationBuilder());
LocationSummary* locations = current->GetLocations();
if (locations != nullptr && locations->Out().IsValid()) {
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
instructions_from_lifetime_position_.Add(current);
current->SetLifetimePosition(lifetime_position);
@@ -178,8 +179,8 @@
HBasicBlock* successor = block->GetSuccessors().Get(i);
live_in->Union(GetLiveInSet(*successor));
size_t phi_input_index = successor->GetPredecessorIndexOf(block);
- for (HInstructionIterator it(successor->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* phi = it.Current();
+ for (HInstructionIterator inst_it(successor->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
HInstruction* input = phi->InputAt(phi_input_index);
input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block);
// A phi input whose last user is the phi dies at the end of the predecessor block,
@@ -195,8 +196,9 @@
current->GetLiveInterval()->AddRange(block->GetLifetimeStart(), block->GetLifetimeEnd());
}
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
+ back_it.Advance()) {
+ HInstruction* current = back_it.Current();
if (current->HasSsaIndex()) {
// Kill the instruction and shorten its interval.
kill->SetBit(current->GetSsaIndex());
@@ -230,8 +232,8 @@
}
// Kill phis defined in this block.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
if (current->HasSsaIndex()) {
kill->SetBit(current->GetSsaIndex());
live_in->ClearBit(current->GetSsaIndex());
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 7dda4f6..ca08d5b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -25,7 +25,7 @@
static constexpr int kNoRegister = -1;
-class BlockInfo : public ArenaObject {
+class BlockInfo : public ArenaObject<kArenaAllocMisc> {
public:
BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
@@ -53,7 +53,7 @@
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange : public ArenaObject {
+class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
@@ -64,16 +64,16 @@
size_t GetEnd() const { return end_; }
LiveRange* GetNext() const { return next_; }
- bool IntersectsWith(const LiveRange& other) {
+ bool IntersectsWith(const LiveRange& other) const {
return (start_ >= other.start_ && start_ < other.end_)
|| (other.start_ >= start_ && other.start_ < end_);
}
- bool IsBefore(const LiveRange& other) {
+ bool IsBefore(const LiveRange& other) const {
return end_ <= other.start_;
}
- void Dump(std::ostream& stream) {
+ void Dump(std::ostream& stream) const {
stream << "[" << start_ << ", " << end_ << ")";
}
@@ -90,7 +90,7 @@
/**
* A use position represents a live interval use at a given position.
*/
-class UsePosition : public ArenaObject {
+class UsePosition : public ArenaObject<kArenaAllocMisc> {
public:
UsePosition(HInstruction* user,
size_t input_index,
@@ -137,28 +137,13 @@
* An interval is a list of disjoint live ranges where an instruction is live.
* Each instruction that has uses gets an interval.
*/
-class LiveInterval : public ArenaObject {
+class LiveInterval : public ArenaObject<kArenaAllocMisc> {
public:
- LiveInterval(ArenaAllocator* allocator,
- Primitive::Type type,
- HInstruction* defined_by = nullptr,
- bool is_fixed = false,
- int reg = kNoRegister,
- bool is_temp = false,
- bool is_slow_path_safepoint = false)
- : allocator_(allocator),
- first_range_(nullptr),
- last_range_(nullptr),
- first_use_(nullptr),
- type_(type),
- next_sibling_(nullptr),
- parent_(this),
- register_(reg),
- spill_slot_(kNoSpillSlot),
- is_fixed_(is_fixed),
- is_temp_(is_temp),
- is_slow_path_safepoint_(is_slow_path_safepoint),
- defined_by_(defined_by) {}
+ static LiveInterval* MakeInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* instruction = nullptr) {
+ return new (allocator) LiveInterval(allocator, type, instruction);
+ }
static LiveInterval* MakeSlowPathInterval(ArenaAllocator* allocator, HInstruction* instruction) {
return new (allocator) LiveInterval(
@@ -174,7 +159,10 @@
}
bool IsFixed() const { return is_fixed_; }
+ bool IsTemp() const { return is_temp_; }
bool IsSlowPathSafepoint() const { return is_slow_path_safepoint_; }
+ // This interval is the result of a split.
+ bool IsSplit() const { return parent_ != this; }
void AddUse(HInstruction* instruction, size_t input_index, bool is_environment) {
// Set the use within the instruction.
@@ -489,6 +477,7 @@
} while ((use = use->GetNext()) != nullptr);
}
stream << "}";
+ stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit();
}
LiveInterval* GetNextSibling() const { return next_sibling_; }
@@ -520,12 +509,31 @@
// Finds the interval that covers `position`.
const LiveInterval& GetIntervalAt(size_t position) const;
- bool IsTemp() const { return is_temp_; }
-
// Returns whether `other` and `this` share the same kind of register.
bool SameRegisterKind(Location other) const;
private:
+ LiveInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* defined_by = nullptr,
+ bool is_fixed = false,
+ int reg = kNoRegister,
+ bool is_temp = false,
+ bool is_slow_path_safepoint = false)
+ : allocator_(allocator),
+ first_range_(nullptr),
+ last_range_(nullptr),
+ first_use_(nullptr),
+ type_(type),
+ next_sibling_(nullptr),
+ parent_(this),
+ register_(reg),
+ spill_slot_(kNoSpillSlot),
+ is_fixed_(is_fixed),
+ is_temp_(is_temp),
+ is_slow_path_safepoint_(is_slow_path_safepoint),
+ defined_by_(defined_by) {}
+
ArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 4eda0f3..56979e1 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -22,10 +22,10 @@
// Add to the worklist phis referenced by non-phi instructions.
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- for (HUseIterator<HInstruction> it(phi->GetUses()); !it.Done(); it.Advance()) {
- HUseListNode<HInstruction>* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HPhi* phi = inst_it.Current()->AsPhi();
+ for (HUseIterator<HInstruction> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) {
+ HUseListNode<HInstruction>* current = use_it.Current();
HInstruction* user = current->GetUser();
if (!user->IsPhi()) {
worklist_.Add(phi);
@@ -61,8 +61,9 @@
next = current->GetNext();
if (current->AsPhi()->IsDead()) {
if (current->HasUses()) {
- for (HUseIterator<HInstruction> it(current->GetUses()); !it.Done(); it.Advance()) {
- HUseListNode<HInstruction>* user_node = it.Current();
+ for (HUseIterator<HInstruction> use_it(current->GetUses()); !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HInstruction>* user_node = use_it.Current();
HInstruction* user = user_node->GetUser();
DCHECK(user->IsLoopHeaderPhi());
DCHECK(user->AsPhi()->IsDead());
@@ -72,8 +73,9 @@
}
}
if (current->HasEnvironmentUses()) {
- for (HUseIterator<HEnvironment> it(current->GetEnvUses()); !it.Done(); it.Advance()) {
- HUseListNode<HEnvironment>* user_node = it.Current();
+ for (HUseIterator<HEnvironment> use_it(current->GetEnvUses()); !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HEnvironment>* user_node = use_it.Current();
HEnvironment* user = user_node->GetUser();
user->SetRawEnvAt(user_node->GetIndex(), nullptr);
current->RemoveEnvironmentUser(user, user_node->GetIndex());
@@ -90,8 +92,8 @@
// Add all phis in the worklist.
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- worklist_.Add(it.Current()->AsPhi());
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ worklist_.Add(inst_it.Current()->AsPhi());
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 5f74c33..9cfa71c 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -167,33 +167,33 @@
}
// Set the register map.
- MemoryRegion region = dex_register_maps_region.Subregion(
+ MemoryRegion register_region = dex_register_maps_region.Subregion(
next_dex_register_map_offset,
DexRegisterMap::kFixedSize + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
- next_dex_register_map_offset += region.size();
- DexRegisterMap dex_register_map(region);
- stack_map.SetDexRegisterMapOffset(region.start() - memory_start);
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start);
- for (size_t i = 0; i < entry.num_dex_registers; ++i) {
+ for (size_t j = 0; j < entry.num_dex_registers; ++j) {
DexRegisterEntry register_entry =
- dex_register_maps_.Get(i + entry.dex_register_maps_start_index);
- dex_register_map.SetRegisterInfo(i, register_entry.kind, register_entry.value);
+ dex_register_maps_.Get(j + entry.dex_register_maps_start_index);
+ dex_register_map.SetRegisterInfo(j, register_entry.kind, register_entry.value);
}
// Set the inlining info.
if (entry.inlining_depth != 0) {
- MemoryRegion region = inline_infos_region.Subregion(
+ MemoryRegion inline_region = inline_infos_region.Subregion(
next_inline_info_offset,
InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
- next_inline_info_offset += region.size();
- InlineInfo inline_info(region);
+ next_inline_info_offset += inline_region.size();
+ InlineInfo inline_info(inline_region);
- stack_map.SetInlineDescriptorOffset(region.start() - memory_start);
+ stack_map.SetInlineDescriptorOffset(inline_region.start() - memory_start);
inline_info.SetDepth(entry.inlining_depth);
- for (size_t i = 0; i < entry.inlining_depth; ++i) {
- InlineInfoEntry inline_entry = inline_infos_.Get(i + entry.inline_infos_start_index);
- inline_info.SetMethodReferenceIndexAtDepth(i, inline_entry.method_index);
+ for (size_t j = 0; j < entry.inlining_depth; ++j) {
+ InlineInfoEntry inline_entry = inline_infos_.Get(j + entry.inline_infos_start_index);
+ inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
}
} else {
stack_map.SetInlineDescriptorOffset(InlineInfo::kNoInlineInfo);
diff --git a/compiler/output_stream.cc b/compiler/output_stream.cc
new file mode 100644
index 0000000..a8b64ca
--- /dev/null
+++ b/compiler/output_stream.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "output_stream.h"
+
+namespace art {
+
+std::ostream& operator<<(std::ostream& os, const Whence& rhs) {
+ switch (rhs) {
+ case kSeekSet: os << "SEEK_SET"; break;
+ case kSeekCurrent: os << "SEEK_CUR"; break;
+ case kSeekEnd: os << "SEEK_END"; break;
+ default: UNREACHABLE();
+ }
+ return os;
+}
+
+} // namespace art
diff --git a/compiler/output_stream.h b/compiler/output_stream.h
index 97ccc2c..4d30b83 100644
--- a/compiler/output_stream.h
+++ b/compiler/output_stream.h
@@ -17,9 +17,7 @@
#ifndef ART_COMPILER_OUTPUT_STREAM_H_
#define ART_COMPILER_OUTPUT_STREAM_H_
-#include <stdint.h>
-#include <sys/types.h>
-
+#include <ostream>
#include <string>
#include "base/macros.h"
@@ -31,6 +29,7 @@
kSeekCurrent = SEEK_CUR,
kSeekEnd = SEEK_END,
};
+std::ostream& operator<<(std::ostream& os, const Whence& rhs);
class OutputStream {
public:
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 0c93f0a..004af98 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -114,7 +114,7 @@
<< num_allocations << ", avg size: " << bytes_allocated / num_allocations << "\n";
}
os << "===== Allocation by kind\n";
- COMPILE_ASSERT(arraysize(kAllocNames) == kNumArenaAllocKinds, check_arraysize_kAllocNames);
+ static_assert(arraysize(kAllocNames) == kNumArenaAllocKinds, "arraysize of kAllocNames");
for (int i = 0; i < kNumArenaAllocKinds; i++) {
os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n";
}
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index b2f5ca9..6d21399 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -82,7 +82,7 @@
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes); UNUSED(kind); }
+ void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index de35f3d..f17e5a9 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,12 +16,12 @@
#include "arena_allocator.h"
#include "arena_bit_vector.h"
-#include "base/allocator.h"
namespace art {
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator,
+ public ArenaObject<kArenaAllocGrowableBitMap> {
public:
explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
~ArenaBitVectorAllocator() {}
@@ -32,11 +32,6 @@
virtual void Free(void*) {} // Nop.
- static void* operator new(size_t size, ArenaAlloc* arena) {
- return arena->Alloc(sizeof(ArenaBitVectorAllocator), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAlloc* const arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index c92658f..34f1ca9 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -17,12 +17,14 @@
#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#include "arena_object.h"
#include "base/bit_vector.h"
-#include "utils/arena_allocator.h"
-#include "utils/scoped_arena_allocator.h"
namespace art {
+class ArenaAllocator;
+class ScopedArenaAllocator;
+
// Type of growable bitmap for memory tuning.
enum OatBitMapKind {
kBitMapMisc = 0,
@@ -50,7 +52,7 @@
/*
* A BitVector implementation that uses Arena allocation.
*/
-class ArenaBitVector : public BitVector {
+class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
public:
ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
OatBitMapKind kind = kBitMapMisc);
@@ -58,16 +60,10 @@
OatBitMapKind kind = kBitMapMisc);
~ArenaBitVector() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
+
+ DISALLOW_COPY_AND_ASSIGN(ArenaBitVector);
};
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
index c48b0c8..8252591 100644
--- a/compiler/utils/arena_containers.h
+++ b/compiler/utils/arena_containers.h
@@ -66,7 +66,7 @@
class ArenaAllocatorAdapterKindImpl<false> {
public:
// Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
- explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+ explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { UNUSED(kind); }
ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
ArenaAllocKind Kind() { return kArenaAllocSTL; }
};
@@ -159,11 +159,13 @@
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p, n);
}
void construct(pointer p, const_reference val) {
diff --git a/compiler/utils/arena_object.h b/compiler/utils/arena_object.h
index 8f6965e..d64c419 100644
--- a/compiler/utils/arena_object.h
+++ b/compiler/utils/arena_object.h
@@ -19,14 +19,21 @@
#include "arena_allocator.h"
#include "base/logging.h"
+#include "scoped_arena_allocator.h"
namespace art {
+// Parent for arena allocated objects giving appropriate new and delete operators.
+template<enum ArenaAllocKind kAllocKind>
class ArenaObject {
public:
// Allocate a new ArenaObject of 'size' bytes in the Arena.
void* operator new(size_t size, ArenaAllocator* allocator) {
- return allocator->Alloc(size, kArenaAllocMisc);
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
@@ -35,6 +42,26 @@
}
};
+
+// Parent for arena allocated objects that get deleted, gives appropriate new and delete operators.
+// Currently this is used by the quick compiler for debug reference counting arena allocations.
+template<enum ArenaAllocKind kAllocKind>
+class DeletableArenaObject {
+ public:
+ // Allocate a new ArenaObject of 'size' bytes in the Arena.
+ void* operator new(size_t size, ArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
+ }
+
+ void operator delete(void*, size_t) {
+ // Nop.
+ }
+};
+
} // namespace art
#endif // ART_COMPILER_UTILS_ARENA_OBJECT_H_
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index e9788f9..591d461 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -301,11 +301,11 @@
CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
CHECK_ALIGNED(offset, 2); // Multiple of 4.
CHECK((am_ == Offset) || (am_ == NegOffset));
- uint32_t vencoding = (encoding & (0xf << kRnShift)) | (offset >> 2);
+ uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
if (am_ == Offset) {
- vencoding |= 1 << 23;
+ vencoding_value |= 1 << 23;
}
- return vencoding;
+ return vencoding_value;
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index c1ed6a2..dca2ab7 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/logging.h"
+#include "base/value_object.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
@@ -179,8 +180,12 @@
DB_W = (8|0|1) << 21, // decrement before with writeback to base
IB_W = (8|4|1) << 21 // increment before with writeback to base
};
+inline std::ostream& operator<<(std::ostream& os, const BlockAddressMode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
-class Address {
+class Address : public ValueObject {
public:
// Memory operand addressing mode (in ARM encoding form. For others we need
// to adjust)
@@ -260,13 +265,17 @@
}
private:
- Register rn_;
- Register rm_;
- int32_t offset_; // Used as shift amount for register offset.
- Mode am_;
- bool is_immed_offset_;
- Shift shift_;
+ const Register rn_;
+ const Register rm_;
+ const int32_t offset_; // Used as shift amount for register offset.
+ const Mode am_;
+ const bool is_immed_offset_;
+ const Shift shift_;
};
+inline std::ostream& operator<<(std::ostream& os, const Address::Mode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
// Instruction encoding bits.
enum {
@@ -344,10 +353,6 @@
extern const char* kRegisterNames[];
extern const char* kConditionNames[];
-extern std::ostream& operator<<(std::ostream& os, const Register& rhs);
-extern std::ostream& operator<<(std::ostream& os, const SRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const Condition& rhs);
// This is an abstract ARM assembler. Subclasses provide assemblers for the individual
// instruction sets (ARM32, Thumb2, etc.)
@@ -448,8 +453,10 @@
virtual void bkpt(uint16_t imm16) = 0;
virtual void svc(uint32_t imm24) = 0;
- virtual void it(Condition firstcond, ItState i1 = kItOmitted,
- ItState i2 = kItOmitted, ItState i3 = kItOmitted) {
+ virtual void it(Condition firstcond ATTRIBUTE_UNUSED,
+ ItState i1 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i2 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i3 ATTRIBUTE_UNUSED = kItOmitted) {
// Ignored if not supported.
}
@@ -537,14 +544,9 @@
Condition cond = AL) = 0;
virtual void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) = 0;
- virtual void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) = 0;
// Load and Store. May clobber IP.
virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0;
- virtual void LoadSImmediate(SRegister sd, float value, Condition cond = AL) = 0;
- virtual void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) = 0;
virtual void MarkExceptionHandler(Label* label) = 0;
virtual void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index d262b6a..c8a57b1 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1303,7 +1303,6 @@
}
}
-
void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
@@ -1483,12 +1482,12 @@
}
-void Arm32Assembler::cbz(Register rn, Label* target) {
+void Arm32Assembler::cbz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbz is not supported on ARM32";
}
-void Arm32Assembler::cbnz(Register rn, Label* target) {
+void Arm32Assembler::cbnz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbnz is not supported on ARM32";
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index cfc300b..dbabb99 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -238,14 +238,9 @@
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 633f55b..053e843 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -152,6 +152,8 @@
void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
if (rd == rm && !IsHighRegister(rd) && !IsHighRegister(rn) && !force_32bit_) {
// 16 bit.
int16_t encoding = B14 | B9 | B8 | B6 |
@@ -176,6 +178,8 @@
void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 0U /* 0b00 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -192,6 +196,8 @@
void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 01 /* 0b01 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -208,6 +214,8 @@
void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 2U /* 0b010; */;
uint32_t op2 = 0U /* 0b0000 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
@@ -223,6 +231,8 @@
void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B20 |
@@ -238,6 +248,8 @@
void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B21 | B20 |
@@ -293,6 +305,7 @@
void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
@@ -304,6 +317,7 @@
void Thumb2Assembler::strd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 |
@@ -609,9 +623,9 @@
}
-bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
+bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -727,9 +741,9 @@
}
-void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
+void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -789,7 +803,7 @@
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
imm;
@@ -798,7 +812,7 @@
// Register (possibly shifted)
encoding = B31 | B30 | B29 | B27 | B25 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
so.encodingThumb();
@@ -809,7 +823,7 @@
void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -936,9 +950,9 @@
// ADD and SUB are complex enough to warrant their own emitter.
-void Thumb2Assembler::Emit16BitAddSub(Condition cond,
+void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1075,7 +1089,7 @@
void Thumb2Assembler::EmitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1405,7 +1419,7 @@
void Thumb2Assembler::EmitMultiMemOp(Condition cond,
- BlockAddressMode am,
+ BlockAddressMode bam,
bool load,
Register base,
RegList regs) {
@@ -1417,7 +1431,7 @@
must_be_32bit = true;
}
- uint32_t w_bit = am == IA_W || am == DB_W || am == DA_W || am == IB_W;
+ bool w_bit = bam == IA_W || bam == DB_W || bam == DA_W || bam == IB_W;
// 16 bit always uses writeback.
if (!w_bit) {
must_be_32bit = true;
@@ -1425,7 +1439,7 @@
if (must_be_32bit) {
uint32_t op = 0;
- switch (am) {
+ switch (bam) {
case IA:
case IA_W:
op = 1U /* 0b01 */;
@@ -1438,7 +1452,7 @@
case IB:
case DA_W:
case IB_W:
- LOG(FATAL) << "LDM/STM mode not supported on thumb: " << am;
+ LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
}
if (load) {
// Cannot have SP in the list.
@@ -2107,8 +2121,8 @@
branch->ResetSize(Branch::k16Bit);
// Now add a compare instruction in the place the branch was.
- int16_t cmp = B13 | B11 | static_cast<int16_t>(branch->GetRegister()) << 8;
- buffer_.Store<int16_t>(branch_location, cmp);
+ buffer_.Store<int16_t>(branch_location,
+ B13 | B11 | static_cast<int16_t>(branch->GetRegister()) << 8);
// Since have moved made a hole in the code we need to reload the
// current pc.
@@ -2354,7 +2368,6 @@
}
}
-
void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldThumb(rd, R0, MOV, value, &shifter_op)) {
@@ -2378,7 +2391,7 @@
int32_t offset,
Condition cond) {
if (!Address::CanHoldLoadOffsetThumb(type, offset)) {
- CHECK(base != IP);
+ CHECK_NE(base, IP);
LoadImmediate(IP, offset, cond);
add(IP, IP, ShifterOperand(base), cond);
base = IP;
@@ -2454,12 +2467,26 @@
Register base,
int32_t offset,
Condition cond) {
+ Register tmp_reg = kNoRegister;
if (!Address::CanHoldStoreOffsetThumb(type, offset)) {
- CHECK(reg != IP);
- CHECK(base != IP);
- LoadImmediate(IP, offset, cond);
- add(IP, IP, ShifterOperand(base), cond);
- base = IP;
+ CHECK_NE(base, IP);
+ if (reg != IP) {
+ tmp_reg = IP;
+ } else {
+ // Be careful not to use IP twice (for `reg` and to build the
+ // Address object used by the store instruction(s) below).
+ // Instead, save R5 on the stack (or R6 if R5 is not available),
+ // use it as secondary temporary register, and restore it after
+ // the store instruction has been emitted.
+ tmp_reg = base != R5 ? R5 : R6;
+ Push(tmp_reg);
+ if (base == SP) {
+ offset += kRegisterSize;
+ }
+ }
+ LoadImmediate(tmp_reg, offset, cond);
+ add(tmp_reg, tmp_reg, ShifterOperand(base), cond);
+ base = tmp_reg;
offset = 0;
}
CHECK(Address::CanHoldStoreOffsetThumb(type, offset));
@@ -2480,6 +2507,10 @@
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
+ if (tmp_reg != kNoRegister && tmp_reg != IP) {
+ DCHECK(tmp_reg == R5 || tmp_reg == R6);
+ Pop(tmp_reg);
+ }
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index b26173f..9ccdef7 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -269,14 +269,9 @@
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
@@ -324,40 +319,40 @@
private:
// Emit a single 32 or 16 bit data processing instruction.
void EmitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Must the instruction be 32 bits or can it possibly be encoded
// in 16 bits?
bool Is32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 32 bit data processing instruction.
void Emit32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 16 bit data processing instruction.
void Emit16BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
void Emit16BitAddSub(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -365,12 +360,12 @@
uint16_t EmitCompareAndBranch(Register rn, uint16_t prev, bool n);
void EmitLoadStore(Condition cond,
- bool load,
- bool byte,
- bool half,
- bool is_signed,
- Register rd,
- const Address& ad);
+ bool load,
+ bool byte,
+ bool half,
+ bool is_signed,
+ Register rd,
+ const Address& ad);
void EmitMemOpAddressMode3(Condition cond,
int32_t mode,
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 702e03a..1513296 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -38,15 +38,6 @@
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
-
-// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at
-// a time, so that compile time optimizations can be applied.
-// Warning: VFPv3-D32 is untested.
-#define VFPv3_D16
-#if defined(VFPv3_D16) == defined(VFPv3_D32)
-#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time."
-#endif
-
// 4 bits option for the dmb instruction.
// Order and values follows those of the ARM Architecture Reference Manual.
enum DmbOptions {
@@ -66,26 +57,23 @@
};
// Values for double-precision floating point registers.
-enum DRegister {
- D0 = 0,
- D1 = 1,
- D2 = 2,
- D3 = 3,
- D4 = 4,
- D5 = 5,
- D6 = 6,
- D7 = 7,
- D8 = 8,
- D9 = 9,
+enum DRegister { // private marker to avoid generate-operator-out.py from processing.
+ D0 = 0,
+ D1 = 1,
+ D2 = 2,
+ D3 = 3,
+ D4 = 4,
+ D5 = 5,
+ D6 = 6,
+ D7 = 7,
+ D8 = 8,
+ D9 = 9,
D10 = 10,
D11 = 11,
D12 = 12,
D13 = 13,
D14 = 14,
D15 = 15,
-#ifdef VFPv3_D16
- kNumberOfDRegisters = 16,
-#else
D16 = 16,
D17 = 17,
D18 = 18,
@@ -103,7 +91,6 @@
D30 = 30,
D31 = 31,
kNumberOfDRegisters = 32,
-#endif
kNumberOfOverlappingDRegisters = 16,
kNoDRegister = -1,
};
@@ -111,18 +98,18 @@
// Values for the condition field as defined in section A3.2.
-enum Condition {
+enum Condition { // private marker to avoid generate-operator-out.py from processing.
kNoCondition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
+ EQ = 0, // equal
+ NE = 1, // not equal
+ CS = 2, // carry set/unsigned higher or same
+ CC = 3, // carry clear/unsigned lower
+ MI = 4, // minus/negative
+ PL = 5, // plus/positive or zero
+ VS = 6, // overflow
+ VC = 7, // no overflow
+ HI = 8, // unsigned higher
+ LS = 9, // unsigned lower or same
GE = 10, // signed greater than or equal
LT = 11, // signed less than
GT = 12, // signed greater than
@@ -138,16 +125,16 @@
// as defined in section A3.4
enum Opcode {
kNoOperand = -1,
- AND = 0, // Logical AND
- EOR = 1, // Logical Exclusive OR
- SUB = 2, // Subtract
- RSB = 3, // Reverse Subtract
- ADD = 4, // Add
- ADC = 5, // Add with Carry
- SBC = 6, // Subtract with Carry
- RSC = 7, // Reverse Subtract with Carry
- TST = 8, // Test
- TEQ = 9, // Test Equivalence
+ AND = 0, // Logical AND
+ EOR = 1, // Logical Exclusive OR
+ SUB = 2, // Subtract
+ RSB = 3, // Reverse Subtract
+ ADD = 4, // Add
+ ADC = 5, // Add with Carry
+ SBC = 6, // Subtract with Carry
+ RSC = 7, // Reverse Subtract with Carry
+ TST = 8, // Test
+ TEQ = 9, // Test Equivalence
CMP = 10, // Compare
CMN = 11, // Compare Negated
ORR = 12, // Logical (inclusive) OR
@@ -156,7 +143,7 @@
MVN = 15, // Move Not
kMaxOperand = 16
};
-
+std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
@@ -168,11 +155,11 @@
RRX = 4, // Rotate right with extend.
kMaxShift
};
-
+std::ostream& operator<<(std::ostream& os, const Shift& rhs);
// Constants used for the decoding or encoding of the individual fields of
// instructions. Based on the "Figure 3-1 ARM instruction set summary".
-enum InstructionFields {
+enum InstructionFields { // private marker to avoid generate-operator-out.py from processing.
kConditionShift = 28,
kConditionBits = 4,
kTypeShift = 25,
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 1af7374..02011b8 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -474,7 +474,7 @@
UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
}
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
+void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
// TODO: Should we check that m_scratch is IP? - see arm.
#if ANDROID_SMP != 0
___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 1b1d121..a69be25 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -27,8 +27,13 @@
#include "utils/assembler.h"
#include "offsets.h"
#include "utils.h"
+
+// TODO: make vixl clean wrt -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
#include "a64/macro-assembler-a64.h"
#include "a64/disasm-a64.h"
+#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
index e6b4a6a..1a7f2e8 100644
--- a/compiler/utils/array_ref.h
+++ b/compiler/utils/array_ref.h
@@ -68,18 +68,13 @@
template <typename U, size_t size>
constexpr ArrayRef(U (&array)[size],
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::type
+ t ATTRIBUTE_UNUSED = tag())
: array_(array), size_(size) {
}
- constexpr ArrayRef(T* array, size_t size)
- : array_(array), size_(size) {
- }
-
- template <typename U>
- constexpr ArrayRef(U* array, size_t size,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
- : array_(array), size_(size) {
+ constexpr ArrayRef(T* array_in, size_t size_in)
+ : array_(array_in), size_(size_in) {
}
template <typename Alloc>
@@ -89,7 +84,8 @@
template <typename U, typename Alloc>
ArrayRef(const std::vector<U, Alloc>& v,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::tag t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::tag
+ t ATTRIBUTE_UNUSED = tag())
: array_(v.data()), size_(v.size()) {
}
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 8a1289d..6834512 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -125,77 +125,91 @@
}
}
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) {
+void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) {
+void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) {
+void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) {
+void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 2b0c94c..ad7e98d 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -56,9 +56,9 @@
class ExternalLabel {
public:
- ExternalLabel(const char* name, uintptr_t address)
- : name_(name), address_(address) {
- DCHECK(name != nullptr);
+ ExternalLabel(const char* name_in, uintptr_t address_in)
+ : name_(name_in), address_(address_in) {
+ DCHECK(name_in != nullptr);
}
const char* name() const { return name_; }
@@ -140,10 +140,10 @@
int position_;
AssemblerFixup* previous() const { return previous_; }
- void set_previous(AssemblerFixup* previous) { previous_ = previous; }
+ void set_previous(AssemblerFixup* previous_in) { previous_ = previous_in; }
int position() const { return position_; }
- void set_position(int position) { position_ = position; }
+ void set_position(int position_in) { position_ = position_in; }
friend class AssemblerBuffer;
};
@@ -366,7 +366,7 @@
}
// TODO: Implement with disassembler.
- virtual void Comment(const char* format, ...) { }
+ virtual void Comment(const char* format, ...) { UNUSED(format); }
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 61e420c..fde65e7 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -19,26 +19,20 @@
#include <stdint.h>
#include <stddef.h>
-#include "arena_allocator.h"
+
+#include "arena_object.h"
namespace art {
-// Type of growable list for memory tuning.
-enum OatListKind {
- kGrowableArrayMisc = 0,
- kGNumListKinds
-};
-
// Deprecated
// TODO: Replace all uses with ArenaVector<T>.
template<typename T>
-class GrowableArray {
+class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
public:
- GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
+ GrowableArray(ArenaAllocator* arena, size_t init_length)
: arena_(arena),
num_allocated_(init_length),
- num_used_(0),
- kind_(kind) {
+ num_used_(0) {
elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
kArenaAllocGrowableArray));
}
@@ -152,16 +146,10 @@
T* GetRawStorage() const { return elem_list_; }
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(GrowableArray<T>), kArenaAllocGrowableArray);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAllocator* const arena_;
size_t num_allocated_;
size_t num_used_;
- OatListKind kind_;
T* elem_list_;
};
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 0de7403..df93b27 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -140,12 +140,15 @@
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p);
+ UNUSED(n);
DebugStackIndirectTopRef::CheckTop();
}
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
index ce01077..e762f7d 100644
--- a/compiler/utils/stack_checks.h
+++ b/compiler/utils/stack_checks.h
@@ -35,6 +35,7 @@
//
// A frame is considered large when it's above kLargeFrameSize.
static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) {
+ UNUSED(isa);
return size >= kLargeFrameSize;
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index b5bf31b..de4e6de 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -31,7 +31,7 @@
class Immediate : public ValueObject {
public:
- explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(int32_t value_in) : value_(value_in) {}
int32_t value() const { return value_; }
@@ -90,16 +90,16 @@
// Operand can be sub classed (e.g: Address).
Operand() : length_(0) { }
- void SetModRM(int mod, Register rm) {
- CHECK_EQ(mod & ~3, 0);
- encoding_[0] = (mod << 6) | rm;
+ void SetModRM(int mod_in, Register rm_in) {
+ CHECK_EQ(mod_in & ~3, 0);
+ encoding_[0] = (mod_in << 6) | rm_in;
length_ = 1;
}
- void SetSIB(ScaleFactor scale, Register index, Register base) {
+ void SetSIB(ScaleFactor scale_in, Register index_in, Register base_in) {
CHECK_EQ(length_, 1);
- CHECK_EQ(scale & ~3, 0);
- encoding_[1] = (scale << 6) | (index << 3) | base;
+ CHECK_EQ(scale_in & ~3, 0);
+ encoding_[1] = (scale_in << 6) | (index_in << 3) | base_in;
length_ = 2;
}
@@ -122,10 +122,10 @@
explicit Operand(Register reg) { SetModRM(3, reg); }
// Get the operand encoding byte at the given index.
- uint8_t encoding_at(int index) const {
- CHECK_GE(index, 0);
- CHECK_LT(index, length_);
- return encoding_[index];
+ uint8_t encoding_at(int index_in) const {
+ CHECK_GE(index_in, 0);
+ CHECK_LT(index_in, length_);
+ return encoding_[index_in];
}
friend class X86Assembler;
@@ -134,57 +134,57 @@
class Address : public Operand {
public:
- Address(Register base, int32_t disp) {
- Init(base, disp);
+ Address(Register base_in, int32_t disp) {
+ Init(base_in, disp);
}
- Address(Register base, Offset disp) {
- Init(base, disp.Int32Value());
+ Address(Register base_in, Offset disp) {
+ Init(base_in, disp.Int32Value());
}
- Address(Register base, FrameOffset disp) {
- CHECK_EQ(base, ESP);
+ Address(Register base_in, FrameOffset disp) {
+ CHECK_EQ(base_in, ESP);
Init(ESP, disp.Int32Value());
}
- Address(Register base, MemberOffset disp) {
- Init(base, disp.Int32Value());
+ Address(Register base_in, MemberOffset disp) {
+ Init(base_in, disp.Int32Value());
}
- void Init(Register base, int32_t disp) {
- if (disp == 0 && base != EBP) {
- SetModRM(0, base);
- if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ void Init(Register base_in, int32_t disp) {
+ if (disp == 0 && base_in != EBP) {
+ SetModRM(0, base_in);
+ if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in);
} else if (disp >= -128 && disp <= 127) {
- SetModRM(1, base);
- if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetModRM(1, base_in);
+ if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in);
SetDisp8(disp);
} else {
- SetModRM(2, base);
- if (base == ESP) SetSIB(TIMES_1, ESP, base);
+ SetModRM(2, base_in);
+ if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in);
SetDisp32(disp);
}
}
- Address(Register index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index, ESP); // Illegal addressing mode.
+ Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
SetModRM(0, ESP);
- SetSIB(scale, index, EBP);
+ SetSIB(scale_in, index_in, EBP);
SetDisp32(disp);
}
- Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index, ESP); // Illegal addressing mode.
- if (disp == 0 && base != EBP) {
+ Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
+ if (disp == 0 && base_in != EBP) {
SetModRM(0, ESP);
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
} else if (disp >= -128 && disp <= 127) {
SetModRM(1, ESP);
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp8(disp);
} else {
SetModRM(2, ESP);
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp32(disp);
}
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 2de3ce5..5b16f08 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -38,7 +38,7 @@
// conversion rules in expressions regarding negation, especially size_t on 32b.
class Immediate : public ValueObject {
public:
- explicit Immediate(int64_t value) : value_(value) {}
+ explicit Immediate(int64_t value_in) : value_(value_in) {}
int64_t value() const { return value_; }
@@ -105,26 +105,26 @@
// Operand can be sub classed (e.g: Address).
Operand() : rex_(0), length_(0) { }
- void SetModRM(uint8_t mod, CpuRegister rm) {
- CHECK_EQ(mod & ~3, 0);
- if (rm.NeedsRex()) {
+ void SetModRM(uint8_t mod_in, CpuRegister rm_in) {
+ CHECK_EQ(mod_in & ~3, 0);
+ if (rm_in.NeedsRex()) {
rex_ |= 0x41; // REX.000B
}
- encoding_[0] = (mod << 6) | rm.LowBits();
+ encoding_[0] = (mod_in << 6) | rm_in.LowBits();
length_ = 1;
}
- void SetSIB(ScaleFactor scale, CpuRegister index, CpuRegister base) {
+ void SetSIB(ScaleFactor scale_in, CpuRegister index_in, CpuRegister base_in) {
CHECK_EQ(length_, 1);
- CHECK_EQ(scale & ~3, 0);
- if (base.NeedsRex()) {
+ CHECK_EQ(scale_in & ~3, 0);
+ if (base_in.NeedsRex()) {
rex_ |= 0x41; // REX.000B
}
- if (index.NeedsRex()) {
+ if (index_in.NeedsRex()) {
rex_ |= 0x42; // REX.00X0
}
- encoding_[1] = (scale << 6) | (static_cast<uint8_t>(index.LowBits()) << 3) |
- static_cast<uint8_t>(base.LowBits());
+ encoding_[1] = (scale_in << 6) | (static_cast<uint8_t>(index_in.LowBits()) << 3) |
+ static_cast<uint8_t>(base_in.LowBits());
length_ = 2;
}
@@ -148,10 +148,10 @@
explicit Operand(CpuRegister reg) : rex_(0), length_(0) { SetModRM(3, reg); }
// Get the operand encoding byte at the given index.
- uint8_t encoding_at(int index) const {
- CHECK_GE(index, 0);
- CHECK_LT(index, length_);
- return encoding_[index];
+ uint8_t encoding_at(int index_in) const {
+ CHECK_GE(index_in, 0);
+ CHECK_LT(index_in, length_);
+ return encoding_[index_in];
}
friend class X86_64Assembler;
@@ -160,64 +160,64 @@
class Address : public Operand {
public:
- Address(CpuRegister base, int32_t disp) {
- Init(base, disp);
+ Address(CpuRegister base_in, int32_t disp) {
+ Init(base_in, disp);
}
- Address(CpuRegister base, Offset disp) {
- Init(base, disp.Int32Value());
+ Address(CpuRegister base_in, Offset disp) {
+ Init(base_in, disp.Int32Value());
}
- Address(CpuRegister base, FrameOffset disp) {
- CHECK_EQ(base.AsRegister(), RSP);
+ Address(CpuRegister base_in, FrameOffset disp) {
+ CHECK_EQ(base_in.AsRegister(), RSP);
Init(CpuRegister(RSP), disp.Int32Value());
}
- Address(CpuRegister base, MemberOffset disp) {
- Init(base, disp.Int32Value());
+ Address(CpuRegister base_in, MemberOffset disp) {
+ Init(base_in, disp.Int32Value());
}
- void Init(CpuRegister base, int32_t disp) {
- if (disp == 0 && base.AsRegister() != RBP) {
- SetModRM(0, base);
- if (base.AsRegister() == RSP) {
- SetSIB(TIMES_1, CpuRegister(RSP), base);
+ void Init(CpuRegister base_in, int32_t disp) {
+ if (disp == 0 && base_in.AsRegister() != RBP) {
+ SetModRM(0, base_in);
+ if (base_in.AsRegister() == RSP) {
+ SetSIB(TIMES_1, CpuRegister(RSP), base_in);
}
} else if (disp >= -128 && disp <= 127) {
- SetModRM(1, base);
- if (base.AsRegister() == RSP) {
- SetSIB(TIMES_1, CpuRegister(RSP), base);
+ SetModRM(1, base_in);
+ if (base_in.AsRegister() == RSP) {
+ SetSIB(TIMES_1, CpuRegister(RSP), base_in);
}
SetDisp8(disp);
} else {
- SetModRM(2, base);
- if (base.AsRegister() == RSP) {
- SetSIB(TIMES_1, CpuRegister(RSP), base);
+ SetModRM(2, base_in);
+ if (base_in.AsRegister() == RSP) {
+ SetSIB(TIMES_1, CpuRegister(RSP), base_in);
}
SetDisp32(disp);
}
}
- Address(CpuRegister index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode.
+ Address(CpuRegister index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode.
SetModRM(0, CpuRegister(RSP));
- SetSIB(scale, index, CpuRegister(RBP));
+ SetSIB(scale_in, index_in, CpuRegister(RBP));
SetDisp32(disp);
}
- Address(CpuRegister base, CpuRegister index, ScaleFactor scale, int32_t disp) {
- CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode.
- if (disp == 0 && base.AsRegister() != RBP) {
+ Address(CpuRegister base_in, CpuRegister index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode.
+ if (disp == 0 && base_in.AsRegister() != RBP) {
SetModRM(0, CpuRegister(RSP));
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
} else if (disp >= -128 && disp <= 127) {
SetModRM(1, CpuRegister(RSP));
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp8(disp);
} else {
SetModRM(2, CpuRegister(RSP));
- SetSIB(scale, index, base);
+ SetSIB(scale_in, index_in, base_in);
SetDisp32(disp);
}
}
diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk
index 2ef826b..4f39c42 100644
--- a/dex2oat/Android.mk
+++ b/dex2oat/Android.mk
@@ -38,8 +38,8 @@
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler libziparchive-host,art/compiler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug))
+ $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler libziparchive-host,art/compiler,host,debug))
endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 4f04e72..7770588 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -411,7 +411,6 @@
const std::string& android_root,
bool is_host,
File* oat_file,
- const std::string& oat_location,
TimingLogger* timings,
SafeMap<std::string, std::string>* key_value_store) {
CHECK(key_value_store != nullptr);
@@ -535,7 +534,7 @@
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime->HasCalleeSaveMethod(type)) {
- runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(type), type);
+ runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(), type);
}
}
runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
@@ -1066,8 +1065,8 @@
} else if (option == "--print-all-passes") {
PassDriverMEOpts::SetPrintAllPasses();
} else if (option.starts_with("--dump-cfg-passes=")) {
- std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
- PassDriverMEOpts::SetDumpPassList(dump_passes);
+ std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data();
+ PassDriverMEOpts::SetDumpPassList(dump_passes_string);
} else if (option == "--print-pass-options") {
print_pass_options = true;
} else if (option.starts_with("--pass-options=")) {
@@ -1338,7 +1337,6 @@
// If --image-classes was specified, calculate the full list of classes to include in the image
std::unique_ptr<std::set<std::string>> image_classes(nullptr);
if (image_classes_filename != nullptr) {
- std::string error_msg;
if (image_classes_zip_filename != nullptr) {
image_classes.reset(dex2oat->ReadImageClassesFromZip(image_classes_zip_filename,
image_classes_filename,
@@ -1361,7 +1359,6 @@
} else {
if (dex_filenames.empty()) {
ATRACE_BEGIN("Opening zip archive from file descriptor");
- std::string error_msg;
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd, zip_location.c_str(),
&error_msg));
if (zip_archive.get() == nullptr) {
@@ -1462,7 +1459,6 @@
android_root,
is_host,
oat_file.get(),
- oat_location,
&timings,
key_value_store.get())) {
LOG(ERROR) << "Failed to create oat file: " << oat_location;
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index eb3b024..f2dd1ee 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -84,11 +84,11 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
include external/libcxx/libcxx.mk
+ # For disassembler_arm64.
+ LOCAL_SHARED_LIBRARIES += libvixl
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libvixl
include $(BUILD_SHARED_LIBRARY)
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libvixl
include $(BUILD_HOST_SHARED_LIBRARY)
endif
endef
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index ee652b3..9243b1a 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -125,8 +125,10 @@
};
struct ArmRegister {
- explicit ArmRegister(uint32_t r) : r(r) { CHECK_LE(r, 15U); }
- ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) { CHECK_LE(r, 15U); }
+ explicit ArmRegister(uint32_t r_in) : r(r_in) { CHECK_LE(r_in, 15U); }
+ ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) {
+ CHECK_LE(r, 15U);
+ }
uint32_t r;
};
std::ostream& operator<<(std::ostream& os, const ArmRegister& r) {
@@ -390,7 +392,7 @@
return (bit_a << 31) | ((1 << 30) - (bit_b << 25)) | (slice << 19);
}
-uint64_t VFPExpand64(uint32_t imm8) {
+static uint64_t VFPExpand64(uint32_t imm8) {
CHECK_EQ(imm8 & 0xffu, imm8);
uint64_t bit_a = (imm8 >> 7) & 1;
uint64_t bit_b = (imm8 >> 6) & 1;
@@ -398,45 +400,6 @@
return (bit_a << 31) | ((UINT64_C(1) << 62) - (bit_b << 54)) | (slice << 48);
}
-uint64_t AdvSIMDExpand(uint32_t op, uint32_t cmode, uint32_t imm8) {
- CHECK_EQ(op & 1, op);
- CHECK_EQ(cmode & 0xf, cmode);
- CHECK_EQ(imm8 & 0xff, imm8);
- int32_t cmode321 = cmode >> 1;
- if (imm8 == 0 && cmode321 != 0 && cmode321 != 4 && cmode321 != 7) {
- return INT64_C(0x00000000deadbeef); // UNPREDICTABLE
- }
- uint64_t imm = imm8;
- switch (cmode321) {
- case 3: imm <<= 8; FALLTHROUGH_INTENDED;
- case 2: imm <<= 8; FALLTHROUGH_INTENDED;
- case 1: imm <<= 8; FALLTHROUGH_INTENDED;
- case 0: return static_cast<int64_t>((imm << 32) | imm);
- case 5: imm <<= 8; FALLTHROUGH_INTENDED;
- case 4: return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm);
- case 6:
- imm = ((imm + 1u) << ((cmode & 1) != 0 ? 16 : 8)) - 1u; // Add 8 or 16 ones.
- return static_cast<int64_t>((imm << 32) | imm);
- default:
- CHECK_EQ(cmode321, 7);
- if ((cmode & 1) == 0 && op == 0) {
- imm = (imm << 8) | imm;
- return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm);
- } else if ((cmode & 1) == 0 && op != 0) {
- for (int i = 1; i != 8; ++i) {
- imm |= ((imm >> i) & UINT64_C(1)) << (i * 8);
- }
- imm = imm & ~UINT64_C(0xfe);
- return static_cast<int64_t>((imm << 8) - imm);
- } else if ((cmode & 1) != 0 && op == 0) {
- imm = static_cast<uint32_t>(VFPExpand32(imm8));
- return static_cast<int64_t>((imm << 32) | imm);
- } else {
- return INT64_C(0xdeadbeef00000000); // UNDEFINED
- }
- }
-}
-
size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) {
uint32_t instr = (ReadU16(instr_ptr) << 16) | ReadU16(instr_ptr + 2);
// |111|1 1|1000000|0000|1111110000000000|
@@ -1359,8 +1322,6 @@
}
} else {
// STR Rt, [Rn, Rm, LSL #imm2] - 111 11 000 010 0 nnnn tttt 000000iimmmm
- ArmRegister Rn(instr, 16);
- ArmRegister Rt(instr, 12);
ArmRegister Rm(instr, 0);
uint32_t imm2 = (instr >> 4) & 3;
opcode << "str.w";
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index ad20c70..e56fe4f 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -19,8 +19,11 @@
#include "disassembler.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
#include "a64/decoder-a64.h"
#include "a64/disasm-a64.h"
+#pragma GCC diagnostic pop
namespace art {
namespace arm64 {
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index 25d10bd..a8f120f 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -25,14 +25,14 @@
$(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libart-disassembler libart-compiler,art/disassembler art/compiler,target,ndebug))
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler libart-compiler,art/disassembler art/compiler,target,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libcutils libartd-disassembler libartd-compiler,art/disassembler art/compiler,target,debug))
endif
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libart-disassembler libart-compiler,art/disassembler art/compiler,host,ndebug))
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler libart-compiler,art/disassembler art/compiler,host,debug))
+ $(eval $(call build-art-executable,oatdump,$(OATDUMP_SRC_FILES),libartd-disassembler libartd-compiler,art/disassembler art/compiler,host,debug))
endif
########################################################################
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index f565277..dca048f 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -263,10 +263,13 @@
method_access_flags);
}
- void RegisterForDedup(const DexFile::ClassDef& class_def, uint32_t class_method_index,
- const OatFile::OatMethod& oat_method, const DexFile& dex_file,
- uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ void RegisterForDedup(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED,
+ uint32_t class_method_index ATTRIBUTE_UNUSED,
+ const OatFile::OatMethod& oat_method,
+ const DexFile& dex_file ATTRIBUTE_UNUSED,
+ uint32_t dex_method_idx ATTRIBUTE_UNUSED,
+ const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t method_access_flags ATTRIBUTE_UNUSED) {
state_[oat_method.GetCodeOffset()]++;
}
@@ -294,10 +297,13 @@
return DedupState::kDeduplicatedFirst;
}
- void AddSymbol(const DexFile::ClassDef& class_def, uint32_t class_method_index,
- const OatFile::OatMethod& oat_method, const DexFile& dex_file,
- uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ void AddSymbol(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED,
+ uint32_t class_method_index ATTRIBUTE_UNUSED,
+ const OatFile::OatMethod& oat_method,
+ const DexFile& dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t method_access_flags ATTRIBUTE_UNUSED) {
DedupState dedup = IsDuplicated(oat_method.GetCodeOffset());
if (dedup != DedupState::kDeduplicatedOther) {
std::string pretty_name = PrettyMethod(dex_method_idx, dex_file, true);
@@ -316,7 +322,7 @@
}
// Set oat data offset. Required by ElfBuilder/CodeOutput.
- void SetCodeOffset(size_t offset) {
+ void SetCodeOffset(size_t offset ATTRIBUTE_UNUSED) {
// Nothing to do.
}
@@ -1290,26 +1296,26 @@
std::ostream indent2_os(&indent2_filter);
mirror::ObjectArray<mirror::Object>* image_root_object_array
= image_root_object->AsObjectArray<mirror::Object>();
- for (int i = 0; i < image_root_object_array->GetLength(); i++) {
- mirror::Object* value = image_root_object_array->Get(i);
+ for (int j = 0; j < image_root_object_array->GetLength(); j++) {
+ mirror::Object* value = image_root_object_array->Get(j);
size_t run = 0;
- for (int32_t j = i + 1; j < image_root_object_array->GetLength(); j++) {
- if (value == image_root_object_array->Get(j)) {
+ for (int32_t k = j + 1; k < image_root_object_array->GetLength(); k++) {
+ if (value == image_root_object_array->Get(k)) {
run++;
} else {
break;
}
}
if (run == 0) {
- indent2_os << StringPrintf("%d: ", i);
+ indent2_os << StringPrintf("%d: ", j);
} else {
- indent2_os << StringPrintf("%d to %zd: ", i, i + run);
- i = i + run;
+ indent2_os << StringPrintf("%d to %zd: ", j, j + run);
+ j = j + run;
}
if (value != nullptr) {
PrettyObjectValue(indent2_os, value->GetClass(), value);
} else {
- indent2_os << i << ": null\n";
+ indent2_os << j << ": null\n";
}
}
}
@@ -1741,20 +1747,20 @@
dex_instruction_bytes(0) {}
struct SizeAndCount {
- SizeAndCount(size_t bytes, size_t count) : bytes(bytes), count(count) {}
+ SizeAndCount(size_t bytes_in, size_t count_in) : bytes(bytes_in), count(count_in) {}
size_t bytes;
size_t count;
};
typedef SafeMap<std::string, SizeAndCount> SizeAndCountTable;
SizeAndCountTable sizes_and_counts;
- void Update(const char* descriptor, size_t object_bytes) {
+ void Update(const char* descriptor, size_t object_bytes_in) {
SizeAndCountTable::iterator it = sizes_and_counts.find(descriptor);
if (it != sizes_and_counts.end()) {
- it->second.bytes += object_bytes;
+ it->second.bytes += object_bytes_in;
it->second.count += 1;
} else {
- sizes_and_counts.Put(descriptor, SizeAndCount(object_bytes, 1));
+ sizes_and_counts.Put(descriptor, SizeAndCount(object_bytes_in, 1));
}
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 629330b..75160ca 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -466,14 +466,15 @@
}
void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
- bool is_static_unused) const {
+ bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
mirror::Object* moved_object = patcher_->RelocatedAddressOf(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-void PatchOat::PatchVisitor::operator() (mirror::Class* cls, mirror::Reference* ref) const {
+void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED,
+ mirror::Reference* ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 6f6dcbc..4505b8e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -31,7 +31,6 @@
base/stringprintf.cc \
base/timing_logger.cc \
base/unix_file/fd_file.cc \
- base/unix_file/mapped_file.cc \
base/unix_file/null_file.cc \
base/unix_file/random_access_file_utils.cc \
base/unix_file/string_file.cc \
@@ -296,12 +295,16 @@
arch/x86_64/registers_x86_64.h \
base/allocator.h \
base/mutex.h \
+ debugger.h \
dex_file.h \
dex_instruction.h \
+ gc/allocator/rosalloc.h \
gc/collector/gc_type.h \
+ gc/allocator_type.h \
gc/collector_type.h \
gc/space/space.h \
gc/heap.h \
+ instrumentation.h \
indirect_reference_table.h \
instruction_set.h \
invoke_type.h \
@@ -311,7 +314,10 @@
mirror/class.h \
oat.h \
object_callbacks.h \
+ profiler_options.h \
quick/inline_method_analyser.h \
+ runtime.h \
+ stack.h \
thread.h \
thread_state.h \
verifier/method_verifier.h
@@ -457,15 +463,21 @@
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES)
LOCAL_C_INCLUDES += art/sigchainlib
- LOCAL_SHARED_LIBRARIES += liblog libnativehelper libnativebridge
+ LOCAL_SHARED_LIBRARIES := libnativehelper libnativebridge libsigchain
include external/libcxx/libcxx.mk
LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libdl libutils libsigchain
+ LOCAL_SHARED_LIBRARIES += libdl
+ # ZipArchive support, the order matters here to get all symbols.
LOCAL_STATIC_LIBRARIES := libziparchive libz
+ # For android::FileMap used by libziparchive.
+ LOCAL_SHARED_LIBRARIES += libutils
+ # For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
+ LOCAL_SHARED_LIBRARIES += libcutils
else # host
- LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
- LOCAL_SHARED_LIBRARIES += libsigchain
+ LOCAL_SHARED_LIBRARIES += libziparchive-host
+ # For ashmem_create_region.
+ LOCAL_STATIC_LIBRARIES += libcutils
endif
ifeq ($$(ART_USE_PORTABLE_COMPILER),true)
include $$(LLVM_GEN_INTRINSICS_MK)
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 42bf8fb..cac500c 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -32,7 +32,7 @@
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index fd9c626..9e8d282 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -17,10 +17,8 @@
#include "context_arm.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
-#include "thread.h"
+#include "utils.h"
namespace art {
namespace arm {
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 24e9b1d..85a0dd2 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -18,6 +18,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -26,56 +27,10 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
// Used by soft float.
// Single-precision FP arithmetics.
@@ -84,8 +39,6 @@
extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
// Used by hard float.
-extern "C" int64_t art_quick_f2l(float f); // FLOAT_TO_LONG
-extern "C" int64_t art_quick_d2l(double d); // DOUBLE_TO_LONG
extern "C" float art_quick_fmodf(float a, float b); // REM_FLOAT[_2ADDR]
extern "C" double art_quick_fmod(double a, double b); // REM_DOUBLE[_2ADDR]
@@ -94,38 +47,6 @@
// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
-extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 564fcba..480190a 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -47,7 +47,8 @@
return instr_size;
}
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// Note that in this handler we set up the registers and return to
// longjmp directly rather than going through an assembly language stub. The
// reason for this is that longjmp is (currently) in ARM mode and that would
@@ -64,7 +65,7 @@
VLOG(signals) << "longjmp address: " << reinterpret_cast<void*>(sc->arm_pc);
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
@@ -100,7 +101,8 @@
*out_return_pc = (sc->arm_pc + instr_size) | 1;
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// The code that looks for the catch location needs to know the value of the
// ARM PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault. However the mapping table has
@@ -127,7 +129,8 @@
// The offset from r9 is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault (at PC-4 and PC).
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// These are the instructions to check for. The first one is the ldr r0,[r9,#xxx]
// where xxx is the offset of the suspend trigger.
uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
@@ -196,7 +199,8 @@
// If we determine this is a stack overflow we need to move the stack pointer
// to the overflow region below the protected region.
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 6aacda4..0a31480 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -19,11 +19,8 @@
#include "context_arm64.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
-#include "thread.h"
-
+#include "utils.h"
namespace art {
namespace arm64 {
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 871e1d1..2d26c03 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -17,6 +17,8 @@
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -25,95 +27,16 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
// Single-precision FP arithmetics.
extern "C" float art_quick_fmodf(float a, float b); // REM_FLOAT[_2ADDR]
// Double-precision FP arithmetics.
-extern "C" double art_quick_fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+extern "C" double art_quick_fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-// Memcpy
-extern "C" void* art_quick_memcpy(void* __restrict, const void* __restrict, size_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 687d232..c914d85 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -37,7 +37,8 @@
namespace art {
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// To match the case used in ARM we return directly to the longjmp function
// rather than through a trivial assembly language stub.
@@ -51,7 +52,7 @@
sc->pc = reinterpret_cast<uintptr_t>(longjmp);
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
@@ -82,7 +83,8 @@
*out_return_pc = sc->pc + 4;
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// The code that looks for the catch location needs to know the value of the
// PC at the point of call. For Null checks we insert a GC map that is immediately after
// the load/store instruction that might cause the fault.
@@ -105,7 +107,8 @@
// The offset from r18 is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault (at PC-4 and PC).
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
// These are the instructions to check for. The first one is the ldr x0,[r18,#xxx]
// where xxx is the offset of the suspend trigger.
uint32_t checkinst1 = 0xf9400240 | (Thread::ThreadSuspendTriggerOffset<8>().Int32Value() << 7);
@@ -155,7 +158,8 @@
return false;
}
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
diff --git a/runtime/arch/memcmp16.cc b/runtime/arch/memcmp16.cc
index 5a3e73e..813df2f 100644
--- a/runtime/arch/memcmp16.cc
+++ b/runtime/arch/memcmp16.cc
@@ -19,6 +19,7 @@
// This linked against by assembly stubs, only.
#pragma GCC diagnostic ignored "-Wunused-function"
+int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count);
int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count) {
for (size_t i = 0; i < count; i++) {
if (s0[i] != s1[i]) {
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 789dbbb..e1f6c06 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -17,9 +17,8 @@
#include "context_mips.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
+#include "utils.h"
namespace art {
namespace mips {
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index db0f71f..e86aa1c 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -19,6 +19,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
@@ -27,56 +28,9 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
@@ -104,39 +58,6 @@
// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index aa6d68a..c9949d4 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -29,23 +29,29 @@
namespace art {
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
}
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
- mirror::ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED,
+ mirror::ArtMethod** out_method ATTRIBUTE_UNUSED,
+ uintptr_t* out_return_pc ATTRIBUTE_UNUSED,
+ uintptr_t* out_sp ATTRIBUTE_UNUSED) {
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
return false;
}
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
return false;
}
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
return false;
}
} // namespace art
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c5a0f6c..0fcd297 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -41,7 +41,7 @@
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
}
@@ -530,18 +530,6 @@
#endif
}
- // Method with 32b arg0, 32b arg1, 64b arg2
- size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
- Thread* self, mirror::ArtMethod* referrer) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
- // Just pass through.
- return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
-#else
- // TODO: Needs 4-param invoke.
- return 0;
-#endif
- }
-
static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
int32_t offset;
#ifdef __LP64__
@@ -744,17 +732,17 @@
EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
}
} else {
- bool lock; // Whether to lock or unlock in this step.
+ bool take_lock; // Whether to lock or unlock in this step.
if (counts[index] == 0) {
- lock = true;
+ take_lock = true;
} else if (counts[index] == kThinLockLoops) {
- lock = false;
+ take_lock = false;
} else {
// Randomly.
- lock = r.next() % 2 == 0;
+ take_lock = r.next() % 2 == 0;
}
- if (lock) {
+ if (take_lock) {
test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
self);
counts[index]++;
@@ -1303,8 +1291,8 @@
}
-static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetBooleanStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
@@ -1332,8 +1320,8 @@
std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetByteStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1362,7 +1350,7 @@
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1427,8 +1415,8 @@
#endif
}
-static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetCharStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1455,8 +1443,8 @@
std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetShortStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetShortStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1549,8 +1537,8 @@
#endif
}
-static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet32Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1637,8 +1625,8 @@
}
#endif
-static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetObjStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1702,8 +1690,8 @@
// TODO: Complete these tests for 32b architectures.
-static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet64Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -1724,6 +1712,7 @@
EXPECT_EQ(res, values[i]) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1760,6 +1749,7 @@
EXPECT_EQ(res, static_cast<int64_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1789,47 +1779,47 @@
Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetSFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i)));
Primitive::Type type = f->GetTypeAsPrimitiveType();
switch (type) {
case Primitive::Type::kPrimBoolean:
if (test_type == type) {
- GetSetBooleanStatic(&obj, &f, self, m.Get(), test);
+ GetSetBooleanStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimByte:
if (test_type == type) {
- GetSetByteStatic(&obj, &f, self, m.Get(), test);
+ GetSetByteStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimChar:
if (test_type == type) {
- GetSetCharStatic(&obj, &f, self, m.Get(), test);
+ GetSetCharStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimShort:
if (test_type == type) {
- GetSetShortStatic(&obj, &f, self, m.Get(), test);
+ GetSetShortStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Static(&obj, &f, self, m.Get(), test);
+ GetSet32Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Static(&obj, &f, self, m.Get(), test);
+ GetSet64Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(&obj, &f, self, m.Get(), test);
+ GetSetObjStatic(&f, self, m.Get(), test);
}
break;
@@ -1844,8 +1834,8 @@
Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetIFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i)));
Primitive::Type type = f->GetTypeAsPrimitiveType();
switch (type) {
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index a7beaa9..49aa326 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -17,9 +17,9 @@
#include "context_x86.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
+#include "utils.h"
+
namespace art {
namespace x86 {
@@ -72,6 +72,16 @@
}
}
+bool X86Context::GetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t* val ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
+bool X86Context::SetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t value ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
void X86Context::DoLongJump() {
#if defined(__i386__)
// Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index a350b25..01c8b82 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -62,15 +62,9 @@
bool SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
- bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE;
- bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
void SmashCallerSaves() OVERRIDE;
void DoLongJump() OVERRIDE;
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index f2b91cd..48d6c80 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -18,102 +18,16 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "interpreter/interpreter.h"
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
- const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
-
-// Math entrypoints.
-extern "C" int64_t art_quick_d2l(double);
-extern "C" int64_t art_quick_f2l(float);
-extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
-extern "C" int64_t art_quick_lmod(int64_t, int64_t);
-extern "C" int64_t art_quick_lmul(int64_t, int64_t);
-extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-extern "C" void* art_quick_memcpy(void*, const void*, size_t);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI downcall
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+ const mirror::Class* ref_class);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 9d74ef5..ad962e2 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -231,7 +231,7 @@
return pc - startpc;
}
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
// For the Intel architectures we need to go to an assembly language
// stub. This is because the 32 bit call to longjmp is much different
// from the 64 bit ABI call and pushing things onto the stack inside this
@@ -284,7 +284,7 @@
*out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int, siginfo_t*, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
@@ -324,7 +324,7 @@
// The offset from fs is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault.
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int, siginfo_t*, void* context) {
// These are the instructions to check for. The first one is the mov eax, fs:[xxx]
// where xxx is the offset of the suspend trigger.
#if defined(__x86_64__)
@@ -398,7 +398,7 @@
// This is done before any frame is established in the method. The return
// address for the previous method is on the stack at ESP.
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int, siginfo_t* info, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 79d0666..6e9b99c 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -17,9 +17,8 @@
#include "context_x86_64.h"
#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
+#include "utils.h"
namespace art {
namespace x86_64 {
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index be73594..a2766f7 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -18,6 +18,7 @@
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/math_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -25,100 +26,14 @@
namespace art {
-// Portable entrypoints.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-
// Cast entrypoints.
extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
- const mirror::Class* ref_class);
-extern "C" void art_quick_check_cast(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
-extern "C" void* art_quick_initialize_type(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
-extern "C" void* art_quick_resolve_string(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
-extern "C" int art_quick_set8_static(uint32_t, int8_t);
-extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
-extern "C" int art_quick_set16_static(uint32_t, int16_t);
-extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
-extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static(uint32_t);
-extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static(uint32_t);
-
-// Array entrypoints.
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
-extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
-extern "C" void art_quick_handle_fill_data(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object(void*);
-extern "C" void art_quick_unlock_object(void*);
-
-// Math entrypoints.
-extern "C" int64_t art_quick_d2l(double);
-extern "C" int64_t art_quick_f2l(float);
-extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
-extern "C" int64_t art_quick_lmod(int64_t, int64_t);
-extern "C" int64_t art_quick_lmul(int64_t, int64_t);
-extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-extern "C" void* art_quick_memcpy(void*, const void*, size_t);
-
-// Invoke entrypoints.
-extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
-extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception(void*);
-extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero();
-extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception();
-extern "C" void art_quick_throw_stack_overflow(void*);
-
-// Generic JNI entrypoint
-extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
-
-extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+ const mirror::Class* ref_class);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
#if defined(__APPLE__)
+ UNUSED(ipoints, jpoints, ppoints, qpoints);
UNIMPLEMENTED(FATAL);
#else
// Interpreter
diff --git a/runtime/atomic.h b/runtime/atomic.h
index e57c0c0..cf61277 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -293,17 +293,17 @@
typedef Atomic<int32_t> AtomicInteger;
-COMPILE_ASSERT(sizeof(AtomicInteger) == sizeof(int32_t), weird_atomic_int_size);
-COMPILE_ASSERT(alignof(AtomicInteger) == alignof(int32_t),
- atomic_int_alignment_differs_from_that_of_underlying_type);
-COMPILE_ASSERT(sizeof(Atomic<int64_t>) == sizeof(int64_t), weird_atomic_int64_size);
+static_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size");
+static_assert(alignof(AtomicInteger) == alignof(int32_t),
+ "AtomicInteger alignment differs from that of underlyingtype");
+static_assert(sizeof(Atomic<int64_t>) == sizeof(int64_t), "Weird Atomic<int64> size");
// Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit
// architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte
// aligned.
#if defined(__LP64__)
- COMPILE_ASSERT(alignof(Atomic<int64_t>) == alignof(int64_t),
- atomic_int64_alignment_differs_from_that_of_underlying_type);
+ static_assert(alignof(Atomic<int64_t>) == alignof(int64_t),
+ "Atomic<int64> alignment differs from that of underlying type");
#endif
} // namespace art
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index b8edad3..5a8fbb3 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -23,7 +23,7 @@
Barrier::Barrier(int count)
: count_(count),
- lock_("GC barrier lock"),
+ lock_("GC barrier lock", kThreadSuspendCountLock),
condition_("GC barrier condition", lock_) {
}
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 167e1d6..5ca88e8 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -50,7 +50,7 @@
// Counter, when this reaches 0 all people blocked on the barrier are signalled.
int count_ GUARDED_BY(lock_);
- Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex lock_ ACQUIRED_AFTER(Locks::abort_lock_);
ConditionVariable condition_ GUARDED_BY(lock_);
};
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 994e235..4f2fc07 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -30,11 +30,11 @@
explicit MallocAllocator() {}
~MallocAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
return calloc(sizeof(uint8_t), size);
}
- virtual void Free(void* p) {
+ void Free(void* p) {
free(p);
}
@@ -49,13 +49,15 @@
explicit NoopAllocator() {}
~NoopAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
+ UNUSED(size);
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
- return NULL;
+ UNREACHABLE();
}
- virtual void Free(void* p) {
+ void Free(void* p) {
// Noop.
+ UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 95dd407..30f7f12 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -114,11 +114,12 @@
// Used internally by STL data structures.
template <class U>
TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) throw() {
+ UNUSED(alloc);
}
// Used internally by STL data structures.
TrackingAllocatorImpl() throw() {
- COMPILE_ASSERT(kTag < kAllocatorTagCount, must_be_less_than_count);
+ static_assert(kTag < kAllocatorTagCount, "kTag must be less than kAllocatorTagCount");
}
// Enables an allocator for objects of one type to allocate storage for objects of another type.
@@ -129,6 +130,7 @@
};
pointer allocate(size_type n, const_pointer hint = 0) {
+ UNUSED(hint);
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 71dec7a..4390180 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -16,6 +16,7 @@
#include "bit_vector.h"
+#include <limits>
#include <sstream>
#include "allocator.h"
@@ -40,8 +41,8 @@
storage_size_(storage_size),
allocator_(allocator),
expandable_(expandable) {
- COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes);
- COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits);
+ static_assert(sizeof(*storage_) == kWordBytes, "word bytes");
+ static_assert(sizeof(*storage_) * 8u == kWordBits, "word bits");
if (storage_ == nullptr) {
storage_size_ = BitsToWords(start_bits);
storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * kWordBytes));
@@ -218,13 +219,13 @@
uint32_t idx;
// We can set every storage element with -1.
for (idx = 0; idx < WordIndex(num_bits); idx++) {
- storage_[idx] = -1;
+ storage_[idx] = std::numeric_limits<uint32_t>::max();
}
// Handle the potentially last few bits.
uint32_t rem_num_bits = num_bits & 0x1f;
if (rem_num_bits != 0) {
- storage_[idx] = (1 << rem_num_bits) - 1;
+ storage_[idx] = (1U << rem_num_bits) - 1;
++idx;
}
diff --git a/runtime/base/casts.h b/runtime/base/casts.h
index 138c2fd..c7e39a2 100644
--- a/runtime/base/casts.h
+++ b/runtime/base/casts.h
@@ -77,7 +77,7 @@
inline Dest bit_cast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
- COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), verify_sizes_are_equal);
+ static_assert(sizeof(Dest) == sizeof(Source), "sizes should be equal");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 46c3538..d3a2655 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -213,8 +213,8 @@
ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN,
ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL
};
-COMPILE_ASSERT(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1,
- mismatch_in_size_of_kLogSeverityToAndroidLogPriority_and_values_in_LogSeverity);
+static_assert(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1,
+ "Mismatch in size of kLogSeverityToAndroidLogPriority and values in LogSeverity");
#endif
void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_severity,
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index febea61..66d6fab 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -41,28 +41,6 @@
#define FINAL
#endif
-// The COMPILE_ASSERT macro can be used to verify that a compile time
-// expression is true. For example, you could use it to verify the
-// size of a static array:
-//
-// COMPILE_ASSERT(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,
-// content_type_names_incorrect_size);
-//
-// or to make sure a struct is smaller than a certain size:
-//
-// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
-//
-// The second argument to the macro is the name of the variable. If
-// the expression is false, most compilers will issue a warning/error
-// containing the name of the variable.
-
-template <bool>
-struct CompileAssert {
-};
-
-#define COMPILE_ASSERT(expr, msg) \
- typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] // NOLINT
-
// Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid
// globally importing gtest/gtest.h into the main ART header files.
#define ART_FRIEND_TEST(test_set_name, individual_test)\
@@ -70,9 +48,11 @@
// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
// declarations in a class.
+#if !defined(DISALLOW_COPY_AND_ASSIGN)
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
void operator=(const TypeName&) = delete
+#endif
// A macro to disallow all the implicit constructors, namely the default constructor, copy
// constructor and operator= functions.
@@ -189,7 +169,19 @@
#define PURE __attribute__ ((__pure__))
#define WARN_UNUSED __attribute__((warn_unused_result))
-template<typename T> void UNUSED(const T&) {}
+// A deprecated function to call to create a false use of the parameter, for example:
+// int foo(int x) { UNUSED(x); return 10; }
+// to avoid compiler warnings. Going forward we prefer ATTRIBUTE_UNUSED.
+template<typename... T> void UNUSED(const T&...) {}
+
+// An attribute to place on a parameter to a function, for example:
+// int foo(int x ATTRIBUTE_UNUSED) { return 10; }
+// to avoid compiler warnings.
+#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+
+// Define that a position within code is unreachable, for example:
+// int foo () { LOG(FATAL) << "Don't call me"; UNREACHABLE(); }
+// without the UNREACHABLE a return statement would be necessary.
#define UNREACHABLE __builtin_unreachable
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 628231a..d589eb6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -435,7 +435,7 @@
DISALLOW_COPY_AND_ASSIGN(MutexLock);
};
// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
-#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_declaration_missing_variable_name)
+#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
// construction and releases it upon destruction.
@@ -457,7 +457,7 @@
};
// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
// "ReaderMutexLock mu(lock)".
-#define ReaderMutexLock(x) COMPILE_ASSERT(0, reader_mutex_lock_declaration_missing_variable_name)
+#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
// Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon
// construction and releases it upon destruction.
@@ -479,7 +479,7 @@
};
// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
// "WriterMutexLock mu(lock)".
-#define WriterMutexLock(x) COMPILE_ASSERT(0, writer_mutex_lock_declaration_missing_variable_name)
+#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
// Global mutexes corresponding to the levels above.
class Locks {
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index b8de308..d793bb6 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -67,8 +67,8 @@
ptr_ = nullptr;
length_ = 0;
}
- void set(const char* data, size_type len) {
- ptr_ = data;
+ void set(const char* data_in, size_type len) {
+ ptr_ = data_in;
length_ = len;
}
void set(const char* str) {
@@ -79,8 +79,8 @@
length_ = 0;
}
}
- void set(const void* data, size_type len) {
- ptr_ = reinterpret_cast<const char*>(data);
+ void set(const void* data_in, size_type len) {
+ ptr_ = reinterpret_cast<const char*>(data_in);
length_ = len;
}
diff --git a/runtime/base/unix_file/mapped_file.cc b/runtime/base/unix_file/mapped_file.cc
deleted file mode 100644
index 77f4d02..0000000
--- a/runtime/base/unix_file/mapped_file.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/unix_file/mapped_file.h"
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <algorithm>
-#include <string>
-
-namespace unix_file {
-
-MappedFile::~MappedFile() {
-}
-
-int MappedFile::Close() {
- if (IsMapped()) {
- Unmap();
- }
- return FdFile::Close();
-}
-
-bool MappedFile::MapReadOnly() {
- CHECK(IsOpened());
- CHECK(!IsMapped());
- struct stat st;
- int result = TEMP_FAILURE_RETRY(fstat(Fd(), &st));
- if (result == -1) {
- PLOG(::art::WARNING) << "Failed to stat file '" << GetPath() << "'";
- return false;
- }
- file_size_ = st.st_size;
- do {
- mapped_file_ = mmap(NULL, file_size_, PROT_READ, MAP_PRIVATE, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(::art::WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadOnly;
- return true;
-}
-
-bool MappedFile::MapReadWrite(int64_t file_size) {
- CHECK(IsOpened());
- CHECK(!IsMapped());
-#ifdef __linux__
- int result = TEMP_FAILURE_RETRY(ftruncate64(Fd(), file_size));
-#else
- int result = TEMP_FAILURE_RETRY(ftruncate(Fd(), file_size));
-#endif
- if (result == -1) {
- PLOG(::art::ERROR) << "Failed to truncate file '" << GetPath() << "' to size " << file_size;
- return false;
- }
- file_size_ = file_size;
- do {
- mapped_file_ =
- mmap(NULL, file_size_, PROT_READ | PROT_WRITE, MAP_SHARED, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(::art::WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadWrite;
- return true;
-}
-
-bool MappedFile::Unmap() {
- CHECK(IsMapped());
- int result = TEMP_FAILURE_RETRY(munmap(mapped_file_, file_size_));
- if (result == -1) {
- PLOG(::art::WARNING) << "Failed unmap file '" << GetPath() << "' of size " << file_size_;
- return false;
- } else {
- mapped_file_ = NULL;
- file_size_ = -1;
- return true;
- }
-}
-
-int64_t MappedFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
- if (IsMapped()) {
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t read_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (read_size > 0) {
- memcpy(buf, data() + offset, read_size);
- }
- return read_size;
- } else {
- return FdFile::Read(buf, byte_count, offset);
- }
-}
-
-int MappedFile::SetLength(int64_t new_length) {
- CHECK(!IsMapped());
- return FdFile::SetLength(new_length);
-}
-
-int64_t MappedFile::GetLength() const {
- if (IsMapped()) {
- return file_size_;
- } else {
- return FdFile::GetLength();
- }
-}
-
-int MappedFile::Flush() {
- int rc = IsMapped() ? TEMP_FAILURE_RETRY(msync(mapped_file_, file_size_, 0)) : FdFile::Flush();
- return rc == -1 ? -errno : 0;
-}
-
-int64_t MappedFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
- if (IsMapped()) {
- CHECK_EQ(kMapReadWrite, map_mode_);
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t write_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (write_size > 0) {
- memcpy(data() + offset, buf, write_size);
- }
- return write_size;
- } else {
- return FdFile::Write(buf, byte_count, offset);
- }
-}
-
-int64_t MappedFile::size() const {
- return GetLength();
-}
-
-bool MappedFile::IsMapped() const {
- return mapped_file_ != NULL && mapped_file_ != MAP_FAILED;
-}
-
-char* MappedFile::data() const {
- CHECK(IsMapped());
- return static_cast<char*>(mapped_file_);
-}
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/mapped_file.h b/runtime/base/unix_file/mapped_file.h
deleted file mode 100644
index 73056e9..0000000
--- a/runtime/base/unix_file/mapped_file.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-#define ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-
-#include <fcntl.h>
-#include <string>
-#include "base/unix_file/fd_file.h"
-
-namespace unix_file {
-
-// Random access file which handles an mmap(2), munmap(2) pair in C++
-// RAII style. When a file is mmapped, the random access file
-// interface accesses the mmapped memory directly; otherwise, the
-// standard file I/O is used. Whenever a function fails, it returns
-// false and errno is set to the corresponding error code.
-class MappedFile : public FdFile {
- public:
- // File modes used in Open().
- enum FileMode {
-#ifdef __linux__
- kReadOnlyMode = O_RDONLY | O_LARGEFILE,
- kReadWriteMode = O_CREAT | O_RDWR | O_LARGEFILE,
-#else
- kReadOnlyMode = O_RDONLY,
- kReadWriteMode = O_CREAT | O_RDWR,
-#endif
- };
-
- MappedFile() : FdFile(), file_size_(-1), mapped_file_(NULL) {
- }
- // Creates a MappedFile using the given file descriptor. Takes ownership of
- // the file descriptor.
- explicit MappedFile(int fd) : FdFile(fd), file_size_(-1), mapped_file_(NULL) {
- }
-
- // Unmaps and closes the file if needed.
- virtual ~MappedFile();
-
- // Maps an opened file to memory in the read-only mode.
- bool MapReadOnly();
-
- // Maps an opened file to memory in the read-write mode. Before the
- // file is mapped, it is truncated to 'file_size' bytes.
- bool MapReadWrite(int64_t file_size);
-
- // Unmaps a mapped file so that, e.g., SetLength() may be invoked.
- bool Unmap();
-
- // RandomAccessFile API.
- // The functions below require that the file is open, but it doesn't
- // have to be mapped.
- virtual int Close();
- virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
- // SetLength() requires that the file is not mmapped.
- virtual int SetLength(int64_t new_length);
- virtual int64_t GetLength() const;
- virtual int Flush();
- // Write() requires that, if the file is mmapped, it is mmapped in
- // the read-write mode. Writes past the end of file are discarded.
- virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
-
- // A convenience method equivalent to GetLength().
- int64_t size() const;
-
- // Returns true if the file has been mmapped.
- bool IsMapped() const;
-
- // Returns a pointer to the start of the memory mapping once the
- // file is successfully mapped; crashes otherwise.
- char* data() const;
-
- private:
- enum MapMode {
- kMapReadOnly = 1,
- kMapReadWrite = 2,
- };
-
- mutable int64_t file_size_; // May be updated in GetLength().
- void* mapped_file_;
- MapMode map_mode_;
-
- DISALLOW_COPY_AND_ASSIGN(MappedFile);
-};
-
-} // namespace unix_file
-
-#endif // ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc
deleted file mode 100644
index 59334d4..0000000
--- a/runtime/base/unix_file/mapped_file_test.cc
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/mapped_file.h"
-#include "base/logging.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/random_access_file_test.h"
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/unix_file/string_file.h"
-#include "gtest/gtest.h"
-
-namespace unix_file {
-
-class MappedFileTest : public RandomAccessFileTest {
- protected:
- MappedFileTest() : kContent("some content") {
- }
-
- void SetUp() {
- RandomAccessFileTest::SetUp();
-
- good_path_ = GetTmpPath("some-file.txt");
- int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666));
- FdFile dst(fd);
-
- StringFile src;
- src.Assign(kContent);
-
- ASSERT_TRUE(CopyFile(src, &dst));
- }
-
- void TearDown() {
- ASSERT_EQ(unlink(good_path_.c_str()), 0);
-
- RandomAccessFileTest::TearDown();
- }
-
- virtual RandomAccessFile* MakeTestFile() {
- TEMP_FAILURE_RETRY(truncate(good_path_.c_str(), 0));
- MappedFile* f = new MappedFile;
- CHECK(f->Open(good_path_, MappedFile::kReadWriteMode));
- return f;
- }
-
- const std::string kContent;
- std::string good_path_;
-};
-
-TEST_F(MappedFileTest, OkayToNotUse) {
- MappedFile file;
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
- EXPECT_FALSE(file.IsMapped());
-}
-
-TEST_F(MappedFileTest, OpenClose) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- EXPECT_EQ(0, file.Close());
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
-}
-
-TEST_F(MappedFileTest, OpenFdClose) {
- FILE* f = tmpfile();
- ASSERT_TRUE(f != NULL);
- MappedFile file(fileno(f));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(0, file.Close());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), file.size()));
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadWrite(1));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(1, file.size());
- ASSERT_TRUE(file.data());
- EXPECT_EQ(kContent[0], *file.data());
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanWriteNewData) {
- const std::string new_path(GetTmpPath("new-file.txt"));
- ASSERT_EQ(-1, unlink(new_path.c_str()));
- ASSERT_EQ(ENOENT, errno);
-
- MappedFile file;
- ASSERT_TRUE(file.Open(new_path, MappedFile::kReadWriteMode));
- EXPECT_TRUE(file.MapReadWrite(kContent.size()));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- memcpy(file.data(), kContent.c_str(), kContent.size());
- EXPECT_EQ(0, file.Close());
- EXPECT_FALSE(file.IsMapped());
-
- FdFile new_file(TEMP_FAILURE_RETRY(open(new_path.c_str(), O_RDONLY)));
- StringFile buffer;
- ASSERT_TRUE(CopyFile(new_file, &buffer));
- EXPECT_EQ(kContent, buffer.ToStringPiece());
- EXPECT_EQ(0, unlink(new_path.c_str()));
-}
-
-TEST_F(MappedFileTest, FileMustExist) {
- const std::string bad_path(GetTmpPath("does-not-exist.txt"));
- MappedFile file;
- EXPECT_FALSE(file.Open(bad_path, MappedFile::kReadOnlyMode));
- EXPECT_EQ(-1, file.Fd());
-}
-
-TEST_F(MappedFileTest, FileMustBeWritable) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, RemappingAllowedUntilSuccess) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, ResizeMappedFile) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_TRUE(file.Unmap());
- EXPECT_TRUE(file.MapReadWrite(20));
- EXPECT_EQ(20, file.GetLength());
- EXPECT_EQ(0, file.Flush());
- EXPECT_TRUE(file.Unmap());
- EXPECT_EQ(0, file.Flush());
- EXPECT_EQ(0, file.SetLength(5));
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_EQ(5, file.GetLength());
-}
-
-TEST_F(MappedFileTest, ReadNotMapped) {
- TestRead();
-}
-
-TEST_F(MappedFileTest, SetLengthNotMapped) {
- TestSetLength();
-}
-
-TEST_F(MappedFileTest, WriteNotMapped) {
- TestWrite();
-}
-
-TEST_F(MappedFileTest, ReadMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, ReadMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, WriteMappedReadWrite) {
- TEMP_FAILURE_RETRY(unlink(good_path_.c_str()));
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
-
- // Can't write to a negative offset.
- EXPECT_EQ(-EINVAL, file.Write(kContent.c_str(), 0, -123));
-
- // A zero-length write is a no-op.
- EXPECT_EQ(0, file.Write(kContent.c_str(), 0, 0));
- // But the file size is as given when mapped.
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.GetLength()));
-
- // Data written past the end are discarded.
- EXPECT_EQ(kContent.size() - 1,
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 1)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data() + 1, kContent.size() - 1));
-
- // Data can be overwritten.
- EXPECT_EQ(kContent.size(),
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 0)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), kContent.size()));
-}
-
-#if 0 // death tests don't work on android yet
-
-class MappedFileDeathTest : public MappedFileTest {};
-
-TEST_F(MappedFileDeathTest, MustMapBeforeUse) {
- MappedFile file;
- EXPECT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_DEATH(file.data(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_DEATH(file.MapReadOnly(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_DEATH(file.MapReadWrite(10), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_EQ(kContent.size(), file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, WriteMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- char buf[10];
- EXPECT_DEATH(file.Write(buf, 0, 0), ".*");
-}
-
-#endif
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/null_file.cc b/runtime/base/unix_file/null_file.cc
index 050decb..322c25a 100644
--- a/runtime/base/unix_file/null_file.cc
+++ b/runtime/base/unix_file/null_file.cc
@@ -33,7 +33,8 @@
return 0;
}
-int64_t NullFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
+int64_t NullFile::Read(char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) const {
if (offset < 0) {
return -EINVAL;
}
@@ -51,7 +52,8 @@
return 0;
}
-int64_t NullFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
+int64_t NullFile::Write(const char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) {
if (offset < 0) {
return -EINVAL;
}
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index b2df091..fe5b765 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "jni_internal.h"
+#include "check_jni.h"
#include <sys/mman.h>
#include <zlib.h>
@@ -27,6 +27,7 @@
#include "field_helper.h"
#include "gc/space/space.h"
#include "java_vm_ext.h"
+#include "jni_internal.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -512,7 +513,7 @@
return true;
}
- bool CheckReferenceKind(IndirectRefKind expected_kind, JavaVMExt* vm, Thread* self, jobject obj) {
+ bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
found_kind = GetIndirectRefKind(obj);
@@ -2398,7 +2399,7 @@
}
if (sc.Check(soa, false, "L", &result)) {
DCHECK_EQ(IsSameObject(env, obj, result.L), JNI_TRUE);
- DCHECK(sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), result.L));
+ DCHECK(sc.CheckReferenceKind(kind, soa.Self(), result.L));
return result.L;
}
}
@@ -2410,7 +2411,7 @@
ScopedCheck sc(kFlag_ExcepOkay, function_name);
JniValueType args[2] = {{.E = env}, {.L = obj}};
sc.Check(soa, true, "EL", args);
- if (sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), obj)) {
+ if (sc.CheckReferenceKind(kind, soa.Self(), obj)) {
JniValueType result;
switch (kind) {
case kGlobal:
@@ -3116,7 +3117,7 @@
static jarray NewPrimitiveArray(const char* function_name, JNIEnv* env, jsize length,
Primitive::Type type) {
ScopedObjectAccess soa(env);
- ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ ScopedCheck sc(kFlag_Default, function_name);
JniValueType args[2] = {{.E = env}, {.z = length}};
if (sc.Check(soa, true, "Ez", args)) {
JniValueType result;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 875efbb..ead3fa5 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -105,8 +105,7 @@
}
inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
- mirror::ArtMethod* referrer,
- InvokeType type) {
+ mirror::ArtMethod* referrer) {
mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx);
if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
return nullptr;
@@ -117,7 +116,7 @@
inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
mirror::ArtMethod** referrer,
InvokeType type) {
- mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer);
if (LIKELY(resolved_method != nullptr)) {
return resolved_method;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e2514ec..eeb65f9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -173,7 +173,7 @@
typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
// Adds largest aligned gaps to queue of gaps.
-void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
+static void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
DCHECK(gaps != nullptr);
uint32_t current_offset = gap_start;
@@ -817,7 +817,6 @@
if (oat_dex_file == nullptr) {
if (i == 0 && generated) {
- std::string error_msg;
error_msg = StringPrintf("\nFailed to find dex file '%s' (checksum 0x%x) in generated out "
" file'%s'", dex_location, next_location_checksum,
oat_file->GetLocation().c_str());
@@ -1161,9 +1160,13 @@
image_patch_delta = image_header->GetPatchDelta();
}
const OatHeader& oat_header = oat_file->GetOatHeader();
- bool ret = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
- && (oat_header.GetImagePatchDelta() == image_patch_delta)
- && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin));
+ bool ret = (oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum);
+
+ // If the oat file is PIC, it doesn't care if/how image was relocated. Ignore these checks.
+ if (!oat_file->IsPic()) {
+ ret = ret && (oat_header.GetImagePatchDelta() == image_patch_delta)
+ && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin);
+ }
if (!ret) {
*error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d, %d) with (0x%x, %" PRIdPTR ", %d)",
oat_file->GetLocation().c_str(),
@@ -1189,11 +1192,11 @@
if (oat_dex_file == nullptr) {
*error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x",
oat_file->GetLocation().c_str(), dex_location, dex_location_checksum);
- for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ for (const OatFile::OatDexFile* oat_dex_file_in : oat_file->GetOatDexFiles()) {
*error_msg += StringPrintf("\noat file '%s' contains contents for '%s' with checksum 0x%x",
oat_file->GetLocation().c_str(),
- oat_dex_file->GetDexFileLocation().c_str(),
- oat_dex_file->GetDexFileLocationChecksum());
+ oat_dex_file_in->GetDexFileLocation().c_str(),
+ oat_dex_file_in->GetDexFileLocationChecksum());
}
return false;
}
@@ -1669,8 +1672,8 @@
CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_caches->Get(i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(dex_caches->Get(i)));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
nullptr);
@@ -2037,8 +2040,8 @@
typedef std::pair<const DexFile*, const DexFile::ClassDef*> ClassPathEntry;
// Search a collection of DexFiles for a descriptor
-ClassPathEntry FindInClassPath(const char* descriptor,
- const std::vector<const DexFile*>& class_path) {
+static ClassPathEntry FindInClassPath(const char* descriptor,
+ const std::vector<const DexFile*>& class_path) {
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor);
@@ -2114,12 +2117,12 @@
LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
break;
}
- for (const DexFile* dex_file : *dex_files) {
- const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor);
+ for (const DexFile* cp_dex_file : *dex_files) {
+ const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor);
if (dex_class_def != nullptr) {
- RegisterDexFile(*dex_file);
+ RegisterDexFile(*cp_dex_file);
mirror::Class* klass =
- DefineClass(self, descriptor, class_loader, *dex_file, *dex_class_def);
+ DefineClass(self, descriptor, class_loader, *cp_dex_file, *dex_class_def);
if (klass == nullptr) {
CHECK(self->IsExceptionPending()) << descriptor;
self->ClearException();
@@ -2202,9 +2205,9 @@
}
} else {
ScopedObjectAccessUnchecked soa(self);
- mirror::Class* klass = FindClassInPathClassLoader(soa, self, descriptor, class_loader);
- if (klass != nullptr) {
- return klass;
+ mirror::Class* cp_klass = FindClassInPathClassLoader(soa, self, descriptor, class_loader);
+ if (cp_klass != nullptr) {
+ return cp_klass;
}
ScopedLocalRef<jobject> class_loader_object(soa.Env(),
soa.AddLocalReference<jobject>(class_loader.Get()));
@@ -2449,17 +2452,18 @@
// by search for its position in the declared virtual methods.
oat_method_index = declaring_class->NumDirectMethods();
size_t end = declaring_class->NumVirtualMethods();
- bool found = false;
+ bool found_virtual = false;
for (size_t i = 0; i < end; i++) {
// Check method index instead of identity in case of duplicate method definitions.
if (method->GetDexMethodIndex() ==
declaring_class->GetVirtualMethod(i)->GetDexMethodIndex()) {
- found = true;
+ found_virtual = true;
break;
}
oat_method_index++;
}
- CHECK(found) << "Didn't find oat method index for virtual method: " << PrettyMethod(method);
+ CHECK(found_virtual) << "Didn't find oat method index for virtual method: "
+ << PrettyMethod(method);
}
DCHECK_EQ(oat_method_index,
GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
@@ -2468,10 +2472,9 @@
OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
declaring_class->GetDexClassDefIndex(),
found);
- if (!found) {
+ if (!(*found)) {
return OatFile::OatMethod::Invalid();
}
- *found = true;
return oat_class.GetOatMethod(oat_method_index);
}
@@ -2669,8 +2672,7 @@
void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index,
- uint32_t method_index) {
+ uint32_t class_def_method_index) {
Runtime* runtime = Runtime::Current();
if (runtime->IsCompiler()) {
// The following code only applies to a non-compiler runtime.
@@ -2682,7 +2684,7 @@
if (oat_class != nullptr) {
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
- const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
+ const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
oat_method.LinkMethod(method.Get());
}
@@ -2784,18 +2786,17 @@
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
&has_oat_class);
if (has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, &oat_class);
+ LoadClassMembers(self, dex_file, class_data, klass, &oat_class);
}
}
if (!has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, nullptr);
+ LoadClassMembers(self, dex_file, class_data, klass, nullptr);
}
}
void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
- mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class) {
// Load fields.
ClassDataItemIterator it(dex_file, class_data);
@@ -2872,7 +2873,7 @@
return;
}
klass->SetDirectMethod(i, method.Get());
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
uint32_t it_method_index = it.GetMemberIndex();
if (last_dex_method_index == it_method_index) {
// duplicate case
@@ -2894,7 +2895,7 @@
}
klass->SetVirtualMethod(i, method.Get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
class_def_method_index++;
}
DCHECK(!it.HasNext());
@@ -3211,9 +3212,9 @@
new_class->SetClassLoader(component_type->GetClassLoader());
new_class->SetStatus(mirror::Class::kStatusLoaded, self);
{
- StackHandleScope<mirror::Class::kImtSize> hs(self,
- Runtime::Current()->GetImtUnimplementedMethod());
- new_class->PopulateEmbeddedImtAndVTable(&hs);
+ StackHandleScope<mirror::Class::kImtSize> hs2(self,
+ Runtime::Current()->GetImtUnimplementedMethod());
+ new_class->PopulateEmbeddedImtAndVTable(&hs2);
}
new_class->SetStatus(mirror::Class::kStatusInitialized, self);
// don't need to set new_class->SetObjectSize(..)
@@ -3341,8 +3342,8 @@
for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
it != end && it->first == hash; ++it) {
- mirror::Class* klass = it->second.Read();
- if (klass == existing) {
+ mirror::Class* klass_from_table = it->second.Read();
+ if (klass_from_table == existing) {
class_table_.erase(it);
break;
}
@@ -3558,7 +3559,7 @@
Handle<mirror::Class> super(hs.NewHandle(klass->GetSuperClass()));
if (super.Get() != nullptr) {
// Acquire lock to prevent races on verifying the super class.
- ObjectLock<mirror::Class> lock(self, super);
+ ObjectLock<mirror::Class> super_lock(self, super);
if (!super->IsVerified() && !super->IsErroneous()) {
VerifyClass(self, super);
@@ -3863,10 +3864,10 @@
klass->SetVirtualMethods(virtuals);
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
- StackHandleScope<1> hs(self);
+ StackHandleScope<1> hs2(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype);
if (UNLIKELY(clone == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -3915,11 +3916,11 @@
CHECK(klass->GetIFields() == nullptr);
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
- StackHandleScope<2> hs(self);
+ StackHandleScope<2> hs2(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
- Handle<mirror::ArtMethod> virtual_method(hs.NewHandle(klass->GetVirtualMethod(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> virtual_method(hs2.NewHandle(klass->GetVirtualMethod(i)));
CheckProxyMethod(virtual_method, prototype);
}
@@ -4223,8 +4224,8 @@
mirror::ArtField* resolved_field = dex_cache->GetResolvedField(field_idx);
if (resolved_field == nullptr) {
dex_cache->SetResolvedField(field_idx, field);
- } else if (kIsDebugBuild) {
- CHECK_EQ(field, resolved_field);
+ } else {
+ DCHECK_EQ(field, resolved_field);
}
}
@@ -4236,8 +4237,8 @@
DCHECK(field_it.HasNextStaticField());
CHECK(can_init_statics);
for ( ; value_it.HasNext(); value_it.Next(), field_it.Next()) {
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtField> field(hs.NewHandle(
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::ArtField> field(hs2.NewHandle(
ResolveField(dex_file, field_it.GetMemberIndex(), dex_cache, class_loader, true)));
if (Runtime::Current()->IsActiveTransaction()) {
value_it.ReadValueToField<true>(field);
@@ -4632,7 +4633,14 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())),
name_(nullptr), name_len_(0) {
- DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
+ DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
+ }
+
+ const char* GetName() {
+ if (name_ == nullptr) {
+ name_ = dex_file_->StringDataAndUtf16LengthByIdx(mid_->name_idx_, &name_len_);
+ }
+ return name_;
}
bool HasSameNameAndSignature(mirror::ArtMethod* other)
@@ -4643,9 +4651,7 @@
if (dex_file_ == other_dex_file) {
return mid_->name_idx_ == other_mid.name_idx_ && mid_->proto_idx_ == other_mid.proto_idx_;
}
- if (name_ == nullptr) {
- name_ = dex_file_->StringDataAndUtf16LengthByIdx(mid_->name_idx_, &name_len_);
- }
+ GetName(); // Only used to make sure its calculated.
uint32_t other_name_len;
const char* other_name = other_dex_file->StringDataAndUtf16LengthByIdx(other_mid.name_idx_,
&other_name_len);
@@ -4666,12 +4672,72 @@
uint32_t name_len_;
};
+class LinkVirtualHashTable {
+ public:
+ LinkVirtualHashTable(Handle<mirror::Class> klass, size_t hash_size, uint32_t* hash_table)
+ : klass_(klass), hash_size_(hash_size), hash_table_(hash_table) {
+ std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_);
+ }
+ void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(virtual_method_index);
+ const char* name = local_method->GetName();
+ uint32_t hash = Hash(name);
+ uint32_t index = hash % hash_size_;
+ // Linear probe until we have an empty slot.
+ while (hash_table_[index] != invalid_index_) {
+ if (++index == hash_size_) {
+ index = 0;
+ }
+ }
+ hash_table_[index] = virtual_method_index;
+ }
+ uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const char* name = comparator->GetName();
+ uint32_t hash = Hash(name);
+ size_t index = hash % hash_size_;
+ while (true) {
+ const uint32_t value = hash_table_[index];
+ // Since linear probe makes continuous blocks, hitting an invalid index means we are done
+ // the block and can safely assume not found.
+ if (value == invalid_index_) {
+ break;
+ }
+ if (value != removed_index_) { // This signifies not already overriden.
+ mirror::ArtMethod* virtual_method =
+ klass_->GetVirtualMethodDuringLinking(value);
+ if (comparator->HasSameNameAndSignature(virtual_method->GetInterfaceMethodIfProxy())) {
+ hash_table_[index] = removed_index_;
+ return value;
+ }
+ }
+ if (++index == hash_size_) {
+ index = 0;
+ }
+ }
+ return GetNotFoundIndex();
+ }
+ static uint32_t GetNotFoundIndex() {
+ return invalid_index_;
+ }
+
+ private:
+ static const uint32_t invalid_index_;
+ static const uint32_t removed_index_;
+
+ Handle<mirror::Class> klass_;
+ const size_t hash_size_;
+ uint32_t* const hash_table_;
+};
+
+const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
+const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
+
bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
const size_t num_virtual_methods = klass->NumVirtualMethods();
if (klass->HasSuperClass()) {
const size_t super_vtable_length = klass->GetSuperClass()->GetVTableLength();
const size_t max_count = num_virtual_methods + super_vtable_length;
- size_t actual_count = super_vtable_length;
StackHandleScope<2> hs(self);
Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
@@ -4684,50 +4750,87 @@
for (size_t i = 0; i < super_vtable_length; i++) {
vtable->SetWithoutChecks<false>(i, super_class->GetEmbeddedVTableEntry(i));
}
+ if (num_virtual_methods == 0) {
+ klass->SetVTable(vtable.Get());
+ return true;
+ }
} else {
- CHECK(super_class->GetVTable() != nullptr) << PrettyClass(super_class.Get());
- vtable = hs.NewHandle(super_class->GetVTable()->CopyOf(self, max_count));
+ mirror::ObjectArray<mirror::ArtMethod>* super_vtable = super_class->GetVTable();
+ CHECK(super_vtable != nullptr) << PrettyClass(super_class.Get());
+ if (num_virtual_methods == 0) {
+ klass->SetVTable(super_vtable);
+ return true;
+ }
+ vtable = hs.NewHandle(super_vtable->CopyOf(self, max_count));
if (UNLIKELY(vtable.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
}
-
- // See if any of our virtual methods override the superclass.
+ // How the algorithm works:
+ // 1. Populate hash table by adding num_virtual_methods from klass. The values in the hash
+ // table are: invalid_index for unused slots, index super_vtable_length + i for a virtual
+ // method which has not been matched to a vtable method, and j if the virtual method at the
+ // index overrode the super virtual method at index j.
+ // 2. Loop through super virtual methods, if they overwrite, update hash table to j
+ // (j < super_vtable_length) to avoid redundant checks. (TODO maybe use this info for reducing
+ // the need for the initial vtable which we later shrink back down).
+ // 3. Add non overridden methods to the end of the vtable.
+ static constexpr size_t kMaxStackHash = 250;
+ const size_t hash_table_size = num_virtual_methods * 3;
+ uint32_t* hash_table_ptr;
+ std::unique_ptr<uint32_t[]> hash_heap_storage;
+ if (hash_table_size <= kMaxStackHash) {
+ hash_table_ptr = reinterpret_cast<uint32_t*>(
+ alloca(hash_table_size * sizeof(*hash_table_ptr)));
+ } else {
+ hash_heap_storage.reset(new uint32_t[hash_table_size]);
+ hash_table_ptr = hash_heap_storage.get();
+ }
+ LinkVirtualHashTable hash_table(klass, hash_table_size, hash_table_ptr);
+ // Add virtual methods to the hash table.
for (size_t i = 0; i < num_virtual_methods; ++i) {
- mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
- MethodNameAndSignatureComparator
- virtual_method_name_comparator(local_method->GetInterfaceMethodIfProxy());
- size_t j = 0;
- for (; j < actual_count; ++j) {
- mirror::ArtMethod* super_method = vtable->GetWithoutChecks(j);
- if (super_method->GetDeclaringClass() == klass.Get()) {
- continue; // A previously overridden method.
- }
- if (virtual_method_name_comparator.HasSameNameAndSignature(super_method)) {
- if (klass->CanAccessMember(super_method->GetDeclaringClass(),
- super_method->GetAccessFlags())) {
- if (super_method->IsFinal()) {
- ThrowLinkageError(klass.Get(), "Method %s overrides final method in class %s",
- PrettyMethod(local_method).c_str(),
- super_method->GetDeclaringClassDescriptor());
- return false;
- }
- vtable->SetWithoutChecks<false>(j, local_method);
- local_method->SetMethodIndex(j);
- break;
+ hash_table.Add(i);
+ }
+ // Loop through each super vtable method and see if they are overriden by a method we added to
+ // the hash table.
+ for (size_t j = 0; j < super_vtable_length; ++j) {
+ // Search the hash table to see if we are overidden by any method.
+ mirror::ArtMethod* super_method = vtable->GetWithoutChecks(j);
+ MethodNameAndSignatureComparator super_method_name_comparator(
+ super_method->GetInterfaceMethodIfProxy());
+ uint32_t hash_index = hash_table.FindAndRemove(&super_method_name_comparator);
+ if (hash_index != hash_table.GetNotFoundIndex()) {
+ mirror::ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(hash_index);
+ if (klass->CanAccessMember(super_method->GetDeclaringClass(),
+ super_method->GetAccessFlags())) {
+ if (super_method->IsFinal()) {
+ ThrowLinkageError(klass.Get(), "Method %s overrides final method in class %s",
+ PrettyMethod(virtual_method).c_str(),
+ super_method->GetDeclaringClassDescriptor());
+ return false;
}
- LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(local_method)
+ vtable->SetWithoutChecks<false>(j, virtual_method);
+ virtual_method->SetMethodIndex(j);
+ } else {
+ LOG(WARNING) << "Before Android 4.1, method " << PrettyMethod(virtual_method)
<< " would have incorrectly overridden the package-private method in "
<< PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
}
}
- if (j == actual_count) {
- // Not overriding, append.
- vtable->SetWithoutChecks<false>(actual_count, local_method);
- local_method->SetMethodIndex(actual_count);
- ++actual_count;
+ }
+ // Add the non overridden methods at the end.
+ size_t actual_count = super_vtable_length;
+ for (size_t i = 0; i < num_virtual_methods; ++i) {
+ mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
+ size_t method_idx = local_method->GetMethodIndexDuringLinking();
+ if (method_idx < super_vtable_length &&
+ local_method == vtable->GetWithoutChecks(method_idx)) {
+ continue;
}
+ vtable->SetWithoutChecks<false>(actual_count, local_method);
+ local_method->SetMethodIndex(actual_count);
+ ++actual_count;
}
if (!IsUint(16, actual_count)) {
ThrowClassFormatError(klass.Get(), "Too many methods defined on class: %zd", actual_count);
@@ -4929,7 +5032,7 @@
self->AllowThreadSuspension();
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
- StackHandleScope<2> hs(self);
+ StackHandleScope<2> hs2(self);
const bool is_super = i < super_ifcount;
const bool super_interface = is_super && extend_super_iftable;
Handle<mirror::ObjectArray<mirror::ArtMethod>> method_array;
@@ -4939,13 +5042,13 @@
DCHECK(if_table != nullptr);
DCHECK(if_table->GetMethodArray(i) != nullptr);
// If we are working on a super interface, try extending the existing method array.
- method_array = hs.NewHandle(if_table->GetMethodArray(i)->Clone(self)->
+ method_array = hs2.NewHandle(if_table->GetMethodArray(i)->Clone(self)->
AsObjectArray<mirror::ArtMethod>());
// We are overwriting a super class interface, try to only virtual methods instead of the
// whole vtable.
- input_array = hs.NewHandle(klass->GetVirtualMethods());
+ input_array = hs2.NewHandle(klass->GetVirtualMethods());
} else {
- method_array = hs.NewHandle(AllocArtMethodArray(self, num_methods));
+ method_array = hs2.NewHandle(AllocArtMethodArray(self, num_methods));
// A new interface, we need the whole vtable incase a new interface method is implemented
// in the whole superclass.
input_array = vtable;
@@ -4962,7 +5065,8 @@
}
for (size_t j = 0; j < num_methods; ++j) {
mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
- MethodNameAndSignatureComparator interface_name_comparator(interface_method);
+ MethodNameAndSignatureComparator interface_name_comparator(
+ interface_method->GetInterfaceMethodIfProxy());
int32_t k;
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -4977,7 +5081,7 @@
mirror::ArtMethod* vtable_method_for_name_comparison =
vtable_method->GetInterfaceMethodIfProxy();
if (interface_name_comparator.HasSameNameAndSignature(
- vtable_method_for_name_comparison)) {
+ vtable_method_for_name_comparison)) {
if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
ThrowIllegalAccessError(
klass.Get(),
@@ -4996,10 +5100,10 @@
} else if (imt_ref != conflict_method) {
// If we are not a conflict and we have the same signature and name as the imt entry,
// it must be that we overwrote a superclass vtable entry.
- MethodNameAndSignatureComparator
- imt_ref_name_comparator(imt_ref->GetInterfaceMethodIfProxy());
+ MethodNameAndSignatureComparator imt_ref_name_comparator(
+ imt_ref->GetInterfaceMethodIfProxy());
if (imt_ref_name_comparator.HasSameNameAndSignature(
- vtable_method_for_name_comparison)) {
+ vtable_method_for_name_comparison)) {
out_imt->SetReference(imt_index, vtable_method);
} else {
out_imt->SetReference(imt_index, conflict_method);
@@ -5067,9 +5171,9 @@
}
if (kIsDebugBuild) {
- mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
- for (int i = 0; i < vtable->GetLength(); ++i) {
- CHECK(vtable->GetWithoutChecks(i) != nullptr);
+ mirror::ObjectArray<mirror::ArtMethod>* check_vtable = klass->GetVTableDuringLinking();
+ for (int i = 0; i < check_vtable->GetLength(); ++i) {
+ CHECK(check_vtable->GetWithoutChecks(i) != nullptr);
}
}
@@ -5215,7 +5319,7 @@
<< " class=" << PrettyClass(klass.Get())
<< " field=" << PrettyField(field)
<< " offset="
- << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()));
+ << field->GetField32(mirror::ArtField::OffsetOffset());
}
Primitive::Type type = field->GetTypeAsPrimitiveType();
bool is_primitive = type != Primitive::kPrimNot;
@@ -5758,8 +5862,8 @@
"[S",
"[Ljava/lang/StackTraceElement;",
};
- COMPILE_ASSERT(arraysize(class_roots_descriptors) == size_t(kClassRootsMax),
- mismatch_between_class_descriptors_and_class_root_enum);
+ static_assert(arraysize(class_roots_descriptors) == size_t(kClassRootsMax),
+ "Mismatch between class descriptors and class-root enum");
const char* descriptor = class_roots_descriptors[class_root];
CHECK(descriptor != nullptr);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 8034d62..a1cae4d 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -222,8 +222,7 @@
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer,
- InvokeType type)
+ mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, mirror::ArtMethod** referrer,
InvokeType type)
@@ -506,8 +505,7 @@
Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
- Handle<mirror::Class> klass, mirror::ClassLoader* class_loader,
- const OatFile::OatClass* oat_class)
+ Handle<mirror::Class> klass, const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
@@ -581,7 +579,7 @@
bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
+ uint32_t class_def_method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 88e6265..70807da 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -374,8 +374,8 @@
template <typename T>
struct CheckOffsets {
- CheckOffsets(bool is_static, const char* class_descriptor)
- : is_static(is_static), class_descriptor(class_descriptor) {}
+ CheckOffsets(bool is_static_in, const char* class_descriptor_in)
+ : is_static(is_static_in), class_descriptor(class_descriptor_in) {}
bool is_static;
std::string class_descriptor;
std::vector<CheckOffset> offsets;
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1ca6eb3..bd0dbaa 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -85,7 +85,7 @@
virtual void SetUp();
// Allow subclases such as CommonCompilerTest to add extra options.
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) {}
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
void ClearDirectory(const char* dirpath);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index d9061c8..584743b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -231,7 +231,7 @@
virtual ~DebugInstrumentationListener() {}
void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc)
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
@@ -254,6 +254,7 @@
uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
+ UNUSED(thread, this_object, method, dex_pc);
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
@@ -267,16 +268,18 @@
void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
uint32_t dex_pc, mirror::ArtField* field)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread);
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
- void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
+ void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
+ mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
+ const JValue& field_value)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
- void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1143,7 +1146,7 @@
// the primitive types).
// Returns a newly-allocated buffer full of RefTypeId values.
struct ClassListCreator {
- explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes(classes) {
+ explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) {
}
static bool Visit(mirror::Class* c, void* arg) {
@@ -1383,7 +1386,6 @@
mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
for (int i = 0; i < count; ++i) {
JDWP::ObjectId id = request->ReadObjectId();
- JDWP::JdwpError error;
mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
if (error != JDWP::ERR_NONE) {
return error;
@@ -2288,8 +2290,8 @@
static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
- explicit CountStackDepthVisitor(Thread* thread)
- : StackVisitor(thread, nullptr), depth(0) {}
+ explicit CountStackDepthVisitor(Thread* thread_in)
+ : StackVisitor(thread_in, nullptr), depth(0) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -2327,10 +2329,11 @@
size_t frame_count, JDWP::ExpandBuf* buf) {
class GetFrameVisitor : public StackVisitor {
public:
- GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
+ GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
+ JDWP::ExpandBuf* buf_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr), depth_(0),
- start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
+ start_frame_(start_frame_in), frame_count_(frame_count_in), buf_(buf_in) {
expandBufAdd4BE(buf_, frame_count_);
}
@@ -2450,9 +2453,9 @@
}
struct GetThisVisitor : public StackVisitor {
- GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
+ GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id) {}
+ : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id_in) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -3418,15 +3421,15 @@
//
struct DebugCallbackContext {
- explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
+ explicit DebugCallbackContext(SingleStepControl* single_step_control_cb, int32_t line_number_cb,
const DexFile::CodeItem* code_item)
- : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
- last_pc_valid(false), last_pc(0) {
+ : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
+ code_item_(code_item), last_pc_valid(false), last_pc(0) {
}
- static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
+ static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) {
DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
- if (static_cast<int32_t>(line_number) == context->line_number_) {
+ if (static_cast<int32_t>(line_number_cb) == context->line_number_) {
if (!context->last_pc_valid) {
// Everything from this address until the next line change is ours.
context->last_pc = address;
@@ -4373,7 +4376,7 @@
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
if (native) {
-#ifdef USE_DLMALLOC
+#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC)
dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
#else
UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
@@ -4481,9 +4484,9 @@
}
struct AllocRecordStackVisitor : public StackVisitor {
- AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
+ AllocRecordStackVisitor(Thread* thread, AllocRecord* record_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr), record(record), depth(0) {}
+ : StackVisitor(thread, nullptr), record(record_in), depth(0) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 48e457f..488ba7f 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -189,6 +189,7 @@
// Method for selective deoptimization.
jmethodID method_;
};
+std::ostream& operator<<(std::ostream& os, const DeoptimizationRequest::Kind& rhs);
class Dbg {
public:
@@ -493,7 +494,7 @@
/*
* Debugger notification
*/
- enum {
+ enum EventFlag {
kBreakpoint = 0x01,
kSingleStep = 0x02,
kMethodEntry = 0x04,
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 761441e..16bc33f 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -39,7 +39,6 @@
#include "mirror/string.h"
#include "os.h"
#include "safe_map.h"
-#include "ScopedFd.h"
#include "handle_scope-inl.h"
#include "thread.h"
#include "utf-inl.h"
@@ -47,6 +46,11 @@
#include "well_known_classes.h"
#include "zip_archive.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "ScopedFd.h"
+#pragma GCC diagnostic pop
+
namespace art {
const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
@@ -454,8 +458,8 @@
index = new Index(num_class_defs);
for (uint32_t i = 0; i < num_class_defs; ++i) {
const ClassDef& class_def = GetClassDef(i);
- const char* descriptor = GetClassDescriptor(class_def);
- index->insert(std::make_pair(descriptor, &class_def));
+ const char* class_descriptor = GetClassDescriptor(class_def);
+ index->insert(std::make_pair(class_descriptor, &class_def));
}
// Sanity check the index still doesn't exist, only 1 thread should build it.
CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 10fe6bf..8ced664 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -206,10 +206,10 @@
// (class or interface). These are all in the lower 16b and do not contain runtime flags.
uint32_t GetJavaAccessFlags() const {
// Make sure that none of our runtime-only flags are set.
- COMPILE_ASSERT((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
- valid_class_flags_not_subset_of_java_flags);
- COMPILE_ASSERT((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
- valid_interface_flags_not_subset_of_java_flags);
+ static_assert((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
+ "Valid class flags not a subset of Java flags");
+ static_assert((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
+ "Valid interface flags not a subset of Java flags");
if ((access_flags_ & kAccInterface) != 0) {
// Interface.
@@ -1252,7 +1252,7 @@
template<bool kTransactionActive>
void ReadValueToField(Handle<mirror::ArtField> field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasNext() { return pos_ < array_size_; }
+ bool HasNext() const { return pos_ < array_size_; }
void Next();
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index b913220..af5d9d0 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -35,7 +35,7 @@
class Instruction {
public:
// NOP-encoded switch-statement signatures.
- enum {
+ enum Signatures {
kPackedSwitchSignature = 0x0100,
kSparseSwitchSignature = 0x0200,
kArrayDataSignature = 0x0300,
@@ -79,15 +79,13 @@
DISALLOW_COPY_AND_ASSIGN(ArrayDataPayload);
};
- // TODO: the code layout below is deliberate to avoid this enum being picked up by
- // generate-operator-out.py.
- enum Code
- { // NOLINT(whitespace/braces)
+ enum Code { // private marker to avoid generate-operator-out.py from processing.
#define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = opcode,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM)
#undef DEX_INSTRUCTION_LIST
#undef INSTRUCTION_ENUM
+ RSUB_INT_LIT16 = RSUB_INT,
};
enum Format {
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index fb53271..b6cf921 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -60,6 +60,7 @@
// GDB will place breakpoint into this function.
// To prevent GCC from inlining or removing it we place noinline attribute
// and inline assembler statement inside.
+ void __attribute__((noinline)) __jit_debug_register_code();
void __attribute__((noinline)) __jit_debug_register_code() {
__asm__("");
}
@@ -2396,22 +2397,22 @@
Elf_Shdr* sh = GetSectionHeader(i);
CHECK(sh != nullptr);
if (sh->sh_type == SHT_REL) {
- for (uint32_t i = 0; i < GetRelNum(*sh); i++) {
- Elf_Rel& rel = GetRel(*sh, i);
+ for (uint32_t j = 0; j < GetRelNum(*sh); j++) {
+ Elf_Rel& rel = GetRel(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rel[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ GetFile().GetPath().c_str(), j,
static_cast<uint64_t>(rel.r_offset),
static_cast<uint64_t>(rel.r_offset + base_address));
}
rel.r_offset += base_address;
}
} else if (sh->sh_type == SHT_RELA) {
- for (uint32_t i = 0; i < GetRelaNum(*sh); i++) {
- Elf_Rela& rela = GetRela(*sh, i);
+ for (uint32_t j = 0; j < GetRelaNum(*sh); j++) {
+ Elf_Rela& rela = GetRela(*sh, j);
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf_Rela[%d] from 0x%" PRIx64 " to 0x%" PRIx64,
- GetFile().GetPath().c_str(), i,
+ GetFile().GetPath().c_str(), j,
static_cast<uint64_t>(rela.r_offset),
static_cast<uint64_t>(rela.r_offset + base_address));
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 670bf2a..1a8ca02 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -35,7 +35,6 @@
namespace art {
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
@@ -90,7 +89,6 @@
return klass;
}
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE
static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
@@ -120,7 +118,6 @@
// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
@@ -140,11 +137,9 @@
}
// Given the context of a calling Method and a resolved class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -163,11 +158,9 @@
}
// Given the context of a calling Method and an initialized class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -176,7 +169,6 @@
}
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
@@ -213,7 +205,6 @@
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
@@ -362,7 +353,7 @@
mirror::Object** this_object,
mirror::ArtMethod** referrer, Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
if (resolved_method == nullptr) {
StackHandleScope<1> hs(self);
mirror::Object* null_this = nullptr;
@@ -541,8 +532,7 @@
mirror::Object* this_object,
mirror::ArtMethod* referrer,
bool access_check, InvokeType type) {
- bool is_direct = type == kStatic || type == kDirect;
- if (UNLIKELY(this_object == NULL && !is_direct)) {
+ if (UNLIKELY(this_object == NULL && type != kStatic)) {
return NULL;
}
mirror::ArtMethod* resolved_method =
@@ -567,7 +557,7 @@
}
if (type == kInterface) { // Most common form of slow path dispatch.
return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method);
- } else if (is_direct) {
+ } else if (type == kStatic || type == kDirect) {
return resolved_method;
} else if (type == kSuper) {
return referrer->GetDeclaringClass()->GetSuperClass()
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c46d887..311cafa 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -47,7 +47,6 @@
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -66,7 +65,6 @@
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -74,7 +72,6 @@
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
index f0ad6de..afe769e 100644
--- a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
@@ -25,6 +25,7 @@
mirror::Array* array,
uint32_t payload_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(dex_pc);
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(code_item->insns_ + payload_offset);
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 61d66ba..e7975f8 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -121,6 +121,7 @@
private:
static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__i386__))
+ UNUSED(mh);
return 0;
#else
size_t args_in_regs = 0;
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 7dbfdd5..c0b79b2 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -29,8 +29,7 @@
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -59,6 +58,7 @@
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -80,11 +80,12 @@
} \
} \
} \
- return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -104,7 +105,7 @@
return obj; \
} \
} \
- return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
@@ -226,32 +227,34 @@
}
void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
- switch (entry_points_allocator) {
#if !defined(__APPLE__) || !defined(__LP64__)
+ switch (entry_points_allocator) {
case gc::kAllocatorTypeDlMalloc: {
SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented);
- break;
+ return;
}
case gc::kAllocatorTypeRosAlloc: {
SetQuickAllocEntryPoints_rosalloc(qpoints, entry_points_instrumented);
- break;
+ return;
}
case gc::kAllocatorTypeBumpPointer: {
CHECK(kMovingCollector);
SetQuickAllocEntryPoints_bump_pointer(qpoints, entry_points_instrumented);
- break;
+ return;
}
case gc::kAllocatorTypeTLAB: {
CHECK(kMovingCollector);
SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
+ return;
+ }
+ default:
break;
- }
-#endif
- default: {
- UNIMPLEMENTED(FATAL);
- UNREACHABLE();
- }
}
+#else
+ UNUSED(qpoints);
+#endif
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
new file mode 100644
index 0000000..7d77721
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
+
+#include <cstdint>
+
+// These are extern declarations of assembly stubs with common names.
+
+// Cast entrypoints.
+extern "C" void art_quick_check_cast(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
+extern "C" int art_quick_set8_static(uint32_t, int8_t);
+extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
+extern "C" int art_quick_set16_static(uint32_t, int16_t);
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_static(uint32_t);
+extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
+extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" int16_t art_quick_get_short_static(uint32_t);
+extern "C" uint16_t art_quick_get_char_static(uint32_t);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
+
+// Array entrypoints.
+extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
+extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
+extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
+extern "C" void art_quick_handle_fill_data(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
+
+// Math entrypoints.
+extern "C" int64_t art_quick_d2l(double);
+extern "C" int64_t art_quick_f2l(float);
+extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
+extern "C" int64_t art_quick_lmod(int64_t, int64_t);
+extern "C" int64_t art_quick_lmul(int64_t, int64_t);
+extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
+extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
+extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+
+// Intrinsic entrypoints.
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+extern "C" void* art_quick_memcpy(void*, const void*, size_t);
+
+// Invoke entrypoints.
+extern "C" void art_quick_imt_conflict_trampoline(art::mirror::ArtMethod*);
+extern "C" void art_quick_resolution_trampoline(art::mirror::ArtMethod*);
+extern "C" void art_quick_to_interpreter_bridge(art::mirror::ArtMethod*);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index e0aab75..4f61707 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -229,16 +229,17 @@
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
- + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
+ + sizeof(StackReference<mirror::ArtMethod>)), // Skip StackReference<ArtMethod>.
gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
- COMPILE_ASSERT(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), knum_of_quick_fpr_arg_unexpected);
- COMPILE_ASSERT(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
- kdouble_align_unexpected);
+ static_assert(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0),
+ "Number of Quick FPR arguments unexpected");
+ static_assert(!(kQuickSoftFloatAbi && kQuickDoubleRegAlignedFloatBackFilled),
+ "Double alignment unexpected");
// For register alignment, we want to assume that counters(fpr_double_index_) are even if the
// next register is even.
- COMPILE_ASSERT(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
- knum_quick_fpr_args_not_even);
+ static_assert(!kQuickDoubleRegAlignedFloatBackFilled || kNumQuickFprArgs % 2 == 0,
+ "Number of Quick FPR arguments not even");
}
virtual ~QuickArgumentVisitor() {}
@@ -409,13 +410,6 @@
}
}
- private:
- static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
- uint32_t shorty_len) {
- // 'stack_args_' points to the first method's argument
- return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>.
- }
-
protected:
const bool is_static_;
const char* const shorty_;
@@ -527,8 +521,8 @@
MethodHelper mh(hs.NewHandle(method));
if (mh.Get()->IsStatic() && !mh.Get()->GetDeclaringClass()->IsInitialized()) {
// Ensure static method's class is initialized.
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(mh.Get()->GetDeclaringClass()));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> h_class(hs2.NewHandle(mh.Get()->GetDeclaringClass()));
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(mh.Get());
self->PopManagedStackFragment(fragment);
@@ -972,8 +966,8 @@
delegate_(delegate) {
// For register alignment, we want to assume that counters (gpr_index_, fpr_index_) are even iff
// the next register is even; counting down is just to make the compiler happy...
- COMPILE_ASSERT(kNumNativeGprArgs % 2 == 0U, knum_native_gpr_args_not_even);
- COMPILE_ASSERT(kNumNativeFprArgs % 2 == 0U, knum_native_fpr_args_not_even);
+ static_assert(kNumNativeGprArgs % 2 == 0U, "Number of native GPR arguments not even");
+ static_assert(kNumNativeFprArgs % 2 == 0U, "Number of native FPR arguments not even");
}
virtual ~BuildNativeCallFrameStateMachine() {}
@@ -1234,7 +1228,9 @@
}
virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {}
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(sm);
+ }
void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
@@ -1366,8 +1362,7 @@
// WARNING: After this, *sp won't be pointing to the method anymore!
uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
- bool is_static, const char* shorty, uint32_t shorty_len,
- HandleScope** handle_scope,
+ const char* shorty, uint32_t shorty_len, HandleScope** handle_scope,
uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
@@ -1441,9 +1436,9 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
- return 0U;
+ UNREACHABLE();
}
private:
@@ -1464,7 +1459,7 @@
uintptr_t* start_gpr_reg;
uint32_t* start_fpr_reg;
uintptr_t* start_stack_arg;
- bottom_of_used_area_ = fsc.ComputeLayout(self, sp, is_static, shorty, shorty_len,
+ bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
&handle_scope_,
&start_stack_arg,
&start_gpr_reg, &start_fpr_reg);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index a9af754..85a0b99 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -34,7 +34,7 @@
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1365cd4..ee9b221 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -165,7 +165,7 @@
std::vector<uintptr_t> fake_stack;
Runtime* r = Runtime::Current();
r->SetInstructionSet(kRuntimeISA);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(Runtime::kSaveAll);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 4ae929b..c473684 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -383,7 +383,7 @@
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
-
+ UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 9a6f2b2..b7b6099 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -66,7 +66,7 @@
CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
- COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
+ static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* cardtable_begin = mem_map->Begin();
CHECK(cardtable_begin != NULL);
@@ -98,7 +98,7 @@
}
void CardTable::ClearCardTable() {
- COMPILE_ASSERT(kCardClean == 0, clean_card_must_be_0);
+ static_assert(kCardClean == 0, "kCardClean must be 0");
mem_map_->MadviseDontNeedAndZero();
}
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index c67542f..34c15c7 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -40,9 +40,9 @@
if (LIKELY(bitmap != nullptr)) {
return bitmap->Test(obj);
}
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- return bitmap->Test(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ return lo_bitmap->Test(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
@@ -55,9 +55,9 @@
bitmap->Clear(obj);
return;
}
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- bitmap->Clear(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ lo_bitmap->Clear(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
@@ -70,9 +70,9 @@
return bitmap->Set(obj);
}
visitor(obj);
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- return bitmap->Set(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ return lo_bitmap->Set(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
@@ -87,9 +87,9 @@
return bitmap->AtomicTestAndSet(obj);
}
visitor(obj);
- for (const auto& bitmap : large_object_bitmaps_) {
- if (LIKELY(bitmap->HasAddress(obj))) {
- return bitmap->AtomicTestAndSet(obj);
+ for (const auto& lo_bitmap : large_object_bitmaps_) {
+ if (LIKELY(lo_bitmap->HasAddress(obj))) {
+ return lo_bitmap->AtomicTestAndSet(obj);
}
}
LOG(FATAL) << "Invalid object " << obj;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 753b42d..0a15e9e 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -46,6 +46,7 @@
}
inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -62,6 +63,7 @@
}
void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
+ UNUSED(new_card);
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index d43dc0a..b16a146 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -43,6 +43,7 @@
: dirty_cards_(dirty_cards) {}
void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 40856fc..850325a 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -91,7 +91,7 @@
public:
explicit SimpleCounter(size_t* counter) : count_(counter) {}
- void operator()(mirror::Object* obj) const {
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
(*count_)++;
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index fbeba7f..8558f96 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -19,8 +19,8 @@
#include "base/logging.h"
// ART specific morecore implementation defined in space.cc.
+static void* art_heap_morecore(void* m, intptr_t increment);
#define MORECORE(x) art_heap_morecore(m, x)
-extern "C" void* art_heap_morecore(void* m, intptr_t increment);
// Custom heap error handling.
#define PROCEED_ON_ERROR 0
@@ -31,19 +31,24 @@
// Ugly inclusion of C file so that ART specific #defines configure dlmalloc for our use for
// mspaces (regular dlmalloc is still declared in bionic).
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "../../../bionic/libc/upstream-dlmalloc/malloc.c"
-#pragma GCC diagnostic warning "-Wstrict-aliasing"
-#pragma GCC diagnostic warning "-Wempty-body"
+#pragma GCC diagnostic pop
+static void* art_heap_morecore(void* m, intptr_t increment) {
+ return ::art::gc::allocator::ArtDlMallocMoreCore(m, increment);
+}
static void art_heap_corruption(const char* function) {
LOG(::art::FATAL) << "Corrupt heap detected in: " << function;
}
static void art_heap_usage_error(const char* function, void* p) {
- LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p << " not expected";
+ LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p
+ << " not expected";
}
#include "globals.h"
@@ -70,7 +75,9 @@
}
}
-extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* arg) {
if (used_bytes == 0) {
return;
}
@@ -78,7 +85,10 @@
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+ void* arg) {
+ UNUSED(start);
+ UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index c820b19..0e91a43 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
#define ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
+#include <cstdint>
+
// Configure dlmalloc for mspaces.
// Avoid a collision with one used in llvm.
#undef HAVE_MMAP
@@ -28,12 +30,17 @@
#define ONLY_MSPACES 1
#define MALLOC_INSPECT_ALL 1
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
#include "../../bionic/libc/upstream-dlmalloc/malloc.h"
+#pragma GCC diagnostic pop
+#ifdef HAVE_ANDROID_OS
// Define dlmalloc routines from bionic that cannot be included directly because of redefining
// symbols from the include above.
extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg);
extern "C" int dlmalloc_trim(size_t);
+#endif
// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
// pages back to the kernel.
@@ -45,4 +52,16 @@
extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
+namespace art {
+namespace gc {
+namespace allocator {
+
+// Callback from dlmalloc when it needs to increase the footprint. Must be implemented somewhere
+// else (currently dlmalloc_space.cc).
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment);
+
+} // namespace allocator
+} // namespace gc
+} // namespace art
+
#endif // ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index fa531a7..f9d6a51 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -31,8 +31,6 @@
namespace gc {
namespace allocator {
-extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
-
static constexpr bool kUsePrefetchDuringAllocRun = true;
static constexpr bool kPrefetchNewRunDataByZeroing = false;
static constexpr size_t kPrefetchStride = 64;
@@ -179,7 +177,7 @@
page_map_size_ = new_num_of_pages;
DCHECK_LE(page_map_size_, max_page_map_size_);
free_page_run_size_map_.resize(new_num_of_pages);
- art_heap_rosalloc_morecore(this, increment);
+ ArtRosAllocMoreCore(this, increment);
if (last_free_page_run_size > 0) {
// There was a free page run at the end. Expand its size.
DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
@@ -745,7 +743,7 @@
const size_t idx = run->size_bracket_idx_;
const size_t bracket_size = bracketSizes[idx];
bool run_was_full = false;
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
if (kIsDebugBuild) {
run_was_full = run->IsFull();
}
@@ -785,7 +783,7 @@
DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
run->ZeroHeader();
{
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
FreePages(self, run, true);
}
} else {
@@ -1243,7 +1241,7 @@
run->to_be_bulk_freed_ = false;
#endif
size_t idx = run->size_bracket_idx_;
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
if (run->IsThreadLocal()) {
DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
@@ -1303,7 +1301,7 @@
}
if (!run_was_current) {
run->ZeroHeader();
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
FreePages(self, run, true);
}
} else {
@@ -1521,7 +1519,7 @@
page_map_size_ = new_num_of_pages;
free_page_run_size_map_.resize(new_num_of_pages);
DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
- art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
+ ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
<< footprint_ << " to " << new_footprint;
@@ -1737,14 +1735,14 @@
void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
if (kIsDebugBuild) {
Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::runtime_shutdown_lock_);
- MutexLock mu2(self, *Locks::thread_list_lock_);
+ MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
for (Thread* t : thread_list) {
AssertThreadLocalRunsAreRevoked(t);
}
for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
CHECK_EQ(current_runs_[idx], dedicated_full_run_);
}
}
@@ -1851,7 +1849,8 @@
dedicated_full_run_->SetIsThreadLocal(true);
}
-void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
@@ -1859,7 +1858,8 @@
*bytes_allocated += used_bytes;
}
-void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
@@ -1871,11 +1871,11 @@
Thread* self = Thread::Current();
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
<< "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
ReaderMutexLock wmu(self, bulk_free_lock_);
std::vector<Run*> runs;
{
- MutexLock mu(self, lock_);
+ MutexLock lock_mu(self, lock_);
size_t pm_end = page_map_size_;
size_t i = 0;
while (i < pm_end) {
@@ -1966,7 +1966,7 @@
std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
for (Thread* thread : threads) {
for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
- MutexLock mu(self, *size_bracket_locks_[i]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
CHECK(thread_local_run != nullptr);
CHECK(thread_local_run->IsThreadLocal());
@@ -1975,7 +1975,7 @@
}
}
for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
- MutexLock mu(self, *size_bracket_locks_[i]);
+ MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Run* current_run = current_runs_[i];
CHECK(current_run != nullptr);
if (current_run != dedicated_full_run_) {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index ad7f901..2a0bf10 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -105,6 +105,9 @@
rosalloc->ReleasePageRange(start, start + byte_size);
}
}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FreePageRun);
};
// Represents a run of memory slots of the same size.
@@ -256,6 +259,8 @@
size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
// Turns the bit map into a string for debugging.
static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
+
+ // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
};
// The magic number for a run.
@@ -446,7 +451,7 @@
// Bracket lock names (since locks only have char* names).
std::string size_bracket_lock_names_[kNumOfSizeBrackets];
// The types of page map entries.
- enum {
+ enum PageMapKind {
kPageMapReleased = 0, // Zero and released back to the OS.
kPageMapEmpty, // Zero but probably dirty.
kPageMapRun, // The beginning of a run.
@@ -526,11 +531,15 @@
// Release a range of pages.
size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Dumps the page map for debugging.
+ std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
public:
RosAlloc(void* base, size_t capacity, size_t max_capacity,
PageReleaseMode page_release_mode,
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
+
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
@@ -540,6 +549,7 @@
LOCKS_EXCLUDED(bulk_free_lock_);
size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
+
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(void* ptr);
// Returns the size of the allocated slot for a given size.
@@ -557,6 +567,7 @@
void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg)
LOCKS_EXCLUDED(lock_);
+
// Release empty pages.
size_t ReleasePages() LOCKS_EXCLUDED(lock_);
// Returns the current footprint.
@@ -565,6 +576,7 @@
size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
// Update the current capacity.
void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
+
// Releases the thread-local runs assigned to the given thread back to the common set of runs.
void RevokeThreadLocalRuns(Thread* thread);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
@@ -573,8 +585,7 @@
void AssertThreadLocalRunsAreRevoked(Thread* thread);
// Assert all the thread local runs are revoked.
void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
- // Dumps the page map for debugging.
- std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
static Run* GetDedicatedFullRun() {
return dedicated_full_run_;
}
@@ -597,7 +608,17 @@
void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes);
+
+ private:
+ friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(RosAlloc);
};
+std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+
+// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere
+// else (currently rosalloc_space.cc).
+void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment);
} // namespace allocator
} // namespace gc
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 938b0f1..c6ebc73 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
#define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
+#include <ostream>
+
namespace art {
namespace gc {
@@ -29,6 +31,7 @@
kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
};
+std::ostream& operator<<(std::ostream& os, const AllocatorType& rhs);
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index ce7c75a..ee5a785 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -29,7 +29,9 @@
const std::string& name_prefix = "")
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") +
- "concurrent copying + mark sweep") {}
+ "concurrent copying + mark sweep") {
+ UNUSED(generational);
+ }
~ConcurrentCopying() {}
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 6691b0f..b2482ac 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -239,7 +239,7 @@
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
- TimingLogger::ScopedTiming t(
+ TimingLogger::ScopedTiming t2(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable", GetTimings());
table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
@@ -348,7 +348,7 @@
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
- TimingLogger::ScopedTiming t(
+ TimingLogger::ScopedTiming t2(
space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
"UpdateImageModUnionTableReferences",
GetTimings());
@@ -538,7 +538,7 @@
if (!ShouldSweepSpace(alloc_space)) {
continue;
}
- TimingLogger::ScopedTiming t(
+ TimingLogger::ScopedTiming t2(
alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ad3bb11..6ad44e6 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -658,6 +658,7 @@
// Scans all of the objects
virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ UNUSED(self);
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
@@ -666,10 +667,10 @@
Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
- Object* obj = mark_stack_[--mark_stack_pos_];
- DCHECK(obj != nullptr);
- __builtin_prefetch(obj);
- prefetch_fifo.push_back(obj);
+ Object* mark_stack_obj = mark_stack_[--mark_stack_pos_];
+ DCHECK(mark_stack_obj != nullptr);
+ __builtin_prefetch(mark_stack_obj);
+ prefetch_fifo.push_back(mark_stack_obj);
}
if (UNLIKELY(prefetch_fifo.empty())) {
break;
@@ -927,7 +928,7 @@
kVisitRootFlagStopLoggingNewRoots |
kVisitRootFlagClearRootLog));
if (kVerifyRootsMarked) {
- TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings());
+ TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
Runtime::Current()->VisitRoots(VerifyRootMarked, this);
}
}
@@ -1056,7 +1057,7 @@
// if needed.
if (!mark_bitmap->Test(obj)) {
if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
- TimingLogger::ScopedTiming t("FreeList", GetTimings());
+ TimingLogger::ScopedTiming t2("FreeList", GetTimings());
freed.objects += chunk_free_pos;
freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
chunk_free_pos = 0;
@@ -1068,7 +1069,7 @@
}
}
if (chunk_free_pos > 0) {
- TimingLogger::ScopedTiming t("FreeList", GetTimings());
+ TimingLogger::ScopedTiming t2("FreeList", GetTimings());
freed.objects += chunk_free_pos;
freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
chunk_free_pos = 0;
@@ -1098,10 +1099,10 @@
}
}
{
- TimingLogger::ScopedTiming t("RecordFree", GetTimings());
+ TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
RecordFree(freed);
RecordFreeLOS(freed_los);
- t.NewTiming("ResetStack");
+ t2.NewTiming("ResetStack");
allocations->Reset();
}
sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
@@ -1217,10 +1218,10 @@
Object* obj = NULL;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
- Object* obj = mark_stack_->PopBack();
- DCHECK(obj != NULL);
- __builtin_prefetch(obj);
- prefetch_fifo.push_back(obj);
+ Object* mark_stack_obj = mark_stack_->PopBack();
+ DCHECK(mark_stack_obj != NULL);
+ __builtin_prefetch(mark_stack_obj);
+ prefetch_fifo.push_back(mark_stack_obj);
}
if (prefetch_fifo.empty()) {
break;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index e141b6f..cb9f111 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -224,7 +224,7 @@
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
if (kUseThreadLocalAllocationStack) {
- TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
+ TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings());
heap_->RevokeAllThreadLocalAllocationStacks(self_);
}
heap_->SwapStacks(self_);
@@ -368,7 +368,7 @@
CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
if (is_large_object_space_immune_ && los != nullptr) {
- TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
+ TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
DCHECK(collect_from_space_only_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 4ed6abc..5be3db7 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -58,6 +58,7 @@
}
void StickyMarkSweep::Sweep(bool swap_bitmaps) {
+ UNUSED(swap_bitmaps);
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index c0008aa..06cd326 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -54,6 +54,7 @@
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "heap-inl.h"
#include "image.h"
+#include "intern_table.h"
#include "mirror/art_field-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
@@ -264,14 +265,13 @@
}
// Attempt to create 2 mem maps at or after the requested begin.
main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, &error_str));
+ &error_str));
CHECK(main_mem_map_1.get() != nullptr) << error_str;
if (support_homogeneous_space_compaction ||
background_collector_type_ == kCollectorTypeSS ||
foreground_collector_type_ == kCollectorTypeSS) {
main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
- capacity_, PROT_READ | PROT_WRITE,
- &error_str));
+ capacity_, &error_str));
CHECK(main_mem_map_2.get() != nullptr) << error_str;
}
// Create the non moving space first so that bitmaps don't take up the address range.
@@ -434,8 +434,8 @@
}
}
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
- int prot_flags, std::string* out_error_str) {
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
+ size_t capacity, std::string* out_error_str) {
while (true) {
MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
PROT_READ | PROT_WRITE, true, out_error_str);
@@ -599,8 +599,8 @@
}
}
// Unprotect all the spaces.
- for (const auto& space : continuous_spaces_) {
- mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
+ for (const auto& con_space : continuous_spaces_) {
+ mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE);
}
stream << "Object " << obj;
if (space != nullptr) {
@@ -886,7 +886,7 @@
if (result != NULL) {
return result;
}
- return FindDiscontinuousSpaceFromObject(obj, true);
+ return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
space::ImageSpace* Heap::GetImageSpace() const {
@@ -1017,6 +1017,8 @@
// We never move things in the native heap, so we can finish the GC at this point.
FinishGC(self, collector::kGcTypeNone);
size_t native_reclaimed = 0;
+
+#ifdef HAVE_ANDROID_OS
// Only trim the native heap if we don't care about pauses.
if (!CareAboutPauseTimes()) {
#if defined(USE_DLMALLOC)
@@ -1029,6 +1031,7 @@
UNIMPLEMENTED(WARNING) << "Add trimming support";
#endif
}
+#endif // HAVE_ANDROID_OS
uint64_t end_ns = NanoTime();
VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
<< ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
@@ -1263,12 +1266,12 @@
continue;
}
// Attempt to run the collector, if we succeed, re-try the allocation.
- const bool gc_ran =
+ const bool plan_gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
- if (gc_ran) {
+ if (plan_gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
usable_size);
@@ -1529,7 +1532,7 @@
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
Locks::mutator_lock_->AssertNotHeld(self);
{
- ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
@@ -1601,7 +1604,7 @@
// compacting_gc_disable_count_, this should rarely occurs).
for (;;) {
{
- ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
@@ -1828,6 +1831,7 @@
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
// Don't sweep any spaces since we probably blasted the internal accounting of the free list
// allocator.
+ UNUSED(space);
return false;
}
@@ -1894,6 +1898,7 @@
LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
return;
}
+ Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
VLOG(heap) << "Starting PreZygoteFork";
// Trim the pages at the end of the non moving space.
non_moving_space_->Trim();
@@ -2074,7 +2079,7 @@
bool compacting_gc;
{
gc_complete_lock_->AssertNotHeld(self);
- ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+ ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(gc_cause, self);
@@ -2234,6 +2239,7 @@
void operator()(mirror::Class* klass, mirror::Reference* ref) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(klass);
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
@@ -2578,6 +2584,7 @@
}
void Heap::SwapStacks(Thread* self) {
+ UNUSED(self);
if (kUseThreadLocalAllocationStack) {
live_stack_->AssertAllZero();
}
@@ -2639,15 +2646,15 @@
if (table != nullptr) {
const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
"ImageModUnionClearCards";
- TimingLogger::ScopedTiming t(name, timings);
+ TimingLogger::ScopedTiming t2(name, timings);
table->ClearCards();
} else if (use_rem_sets && rem_set != nullptr) {
DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
<< static_cast<int>(collector_type_);
- TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings);
+ TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
rem_set->ClearCards();
} else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
- TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings);
+ TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings);
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
// were dirty before the GC started.
// TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
@@ -2669,7 +2676,7 @@
TimingLogger* const timings = current_gc_iteration_.GetTimings();
TimingLogger::ScopedTiming t(__FUNCTION__, timings);
if (verify_pre_gc_heap_) {
- TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
size_t failures = VerifyHeapReferences();
if (failures > 0) {
@@ -2679,7 +2686,7 @@
}
// Check that all objects which reference things in the live stack are on dirty cards.
if (verify_missing_card_marks_) {
- TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SwapStacks(self);
// Sort the live stack so that we can quickly binary search it later.
@@ -2688,7 +2695,7 @@
SwapStacks(self);
}
if (verify_mod_union_table_) {
- TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings);
ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
for (const auto& table_pair : mod_union_tables_) {
accounting::ModUnionTable* mod_union_table = table_pair.second;
@@ -2706,6 +2713,7 @@
}
void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
+ UNUSED(gc);
// TODO: Add a new runtime option for this?
if (verify_pre_gc_rosalloc_) {
RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
@@ -2719,7 +2727,7 @@
// Called before sweeping occurs since we want to make sure we are not going so reclaim any
// reachable objects.
if (verify_pre_sweeping_heap_) {
- TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings);
CHECK_NE(self->GetState(), kRunnable);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Swapping bound bitmaps does nothing.
@@ -2752,7 +2760,7 @@
RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification");
}
if (verify_post_gc_heap_) {
- TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings);
+ TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
size_t failures = VerifyHeapReferences();
if (failures > 0) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7b891a6..cf7352e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -622,8 +622,7 @@
// Create a mem map with a preferred base address.
static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
- size_t capacity, int prot_flags,
- std::string* out_error_str);
+ size_t capacity, std::string* out_error_str);
bool SupportHSpaceCompaction() const {
// Returns true if we can do hspace compaction
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 3106b4c..73196b2 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -48,8 +48,8 @@
Handle<mirror::Class> c(
hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
for (size_t i = 0; i < 1024; ++i) {
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ObjectArray<mirror::Object>> array(hs.NewHandle(
+ StackHandleScope<1> hs2(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object>> array(hs2.NewHandle(
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048)));
for (size_t j = 0; j < 2048; ++j) {
mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index bfaa2bb..012f9f9 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -143,7 +143,7 @@
soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
{
- TimingLogger::ScopedTiming t(concurrent ? "EnqueueFinalizerReferences" :
+ TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
"(Paused)EnqueueFinalizerReferences", timings);
if (concurrent) {
StartPreservingReferences(self);
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 0a55b52..04b09e9 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -188,11 +188,11 @@
size_t block_size = header->size_;
pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size);
- CHECK_LE(reinterpret_cast<const uint8_t*>(end), End());
+ const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
+ CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
// We don't know how many objects are allocated in the current block. When we hit a null class
// assume its the end. TODO: Have a thread update the header when it flushes the block?
- while (obj < end && obj->GetClass() != nullptr) {
+ while (obj < end_obj && obj->GetClass() != nullptr) {
callback(obj, arg);
obj = GetNextObject(obj);
}
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 98a3189..089ede4 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -186,8 +186,8 @@
size_t unused_; // Ensures alignment of kAlignment.
};
- COMPILE_ASSERT(sizeof(BlockHeader) % kAlignment == 0,
- continuous_block_must_be_kAlignment_aligned);
+ static_assert(sizeof(BlockHeader) % kAlignment == 0,
+ "continuous block must be kAlignment aligned");
friend class collector::MarkSweep;
DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index d2d95b4..3072c23 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -213,27 +213,6 @@
}
}
-// Callback from dlmalloc when it needs to increase the footprint
-extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
- Heap* heap = Runtime::Current()->GetHeap();
- DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
- // Support for multiple DlMalloc provided by a slow path.
- if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
- dlmalloc_space = nullptr;
- for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
- if (space->IsDlMallocSpace()) {
- DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
- if (cur_dlmalloc_space->GetMspace() == mspace) {
- dlmalloc_space = cur_dlmalloc_space;
- break;
- }
- }
- }
- CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
- }
- return dlmalloc_space->MoreCore(increment);
-}
-
size_t DlMallocSpace::Trim() {
MutexLock mu(Thread::Current(), lock_);
// Trim to release memory at the end of the space.
@@ -314,6 +293,7 @@
}
void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
+ UNUSED(failed_alloc_bytes);
Thread* self = Thread::Current();
size_t max_contiguous_allocation = 0;
// To allow the Walk/InspectAll() to exclusively-lock the mutator
@@ -329,5 +309,31 @@
}
} // namespace space
+
+namespace allocator {
+
+// Implement the dlmalloc morecore callback.
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
+ // Support for multiple DlMalloc provided by a slow path.
+ if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
+ dlmalloc_space = nullptr;
+ for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
+ if (space->IsDlMallocSpace()) {
+ ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
+ if (cur_dlmalloc_space->GetMspace() == mspace) {
+ dlmalloc_space = cur_dlmalloc_space;
+ break;
+ }
+ }
+ }
+ CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
+ }
+ return dlmalloc_space->MoreCore(increment);
+}
+
+} // namespace allocator
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 6e1639c..f03ea31 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -17,6 +17,7 @@
#include "image_space.h"
#include <dirent.h>
+#include <sys/statvfs.h>
#include <sys/types.h>
#include <random>
@@ -376,6 +377,41 @@
return false;
}
+static constexpr uint64_t kLowSpaceValue = 50 * MB;
+static constexpr uint64_t kTmpFsSentinelValue = 384 * MB;
+
+// Read the free space of the cache partition and make a decision whether to keep the generated
+// image. This is to try to mitigate situations where the system might run out of space later.
+static bool CheckSpace(const std::string& cache_filename, std::string* error_msg) {
+ // Using statvfs vs statvfs64 because of b/18207376, and it is enough for all practical purposes.
+ struct statvfs buf;
+
+ int res = TEMP_FAILURE_RETRY(statvfs(cache_filename.c_str(), &buf));
+ if (res != 0) {
+ // Could not stat. Conservatively tell the system to delete the image.
+ *error_msg = "Could not stat the filesystem, assuming low-memory situation.";
+ return false;
+ }
+
+ uint64_t fs_overall_size = buf.f_bsize * static_cast<uint64_t>(buf.f_blocks);
+ // Zygote is privileged, but other things are not. Use bavail.
+ uint64_t fs_free_size = buf.f_bsize * static_cast<uint64_t>(buf.f_bavail);
+
+ // Take the overall size as an indicator for a tmpfs, which is being used for the decryption
+ // environment. We do not want to fail quickening the boot image there, as it is beneficial
+ // for time-to-UI.
+ if (fs_overall_size > kTmpFsSentinelValue) {
+ if (fs_free_size < kLowSpaceValue) {
+ *error_msg = StringPrintf("Low-memory situation: only %4.2f megabytes available after image"
+ " generation, need at least %" PRIu64 ".",
+ static_cast<double>(fs_free_size) / MB,
+ kLowSpaceValue / MB);
+ return false;
+ }
+ }
+ return true;
+}
+
ImageSpace* ImageSpace::Create(const char* image_location,
const InstructionSet image_isa,
std::string* error_msg) {
@@ -523,6 +559,13 @@
PruneDexCache(image_isa);
return nullptr;
} else {
+ // Check whether there is enough space left over after we have generated the image.
+ if (!CheckSpace(cache_filename, error_msg)) {
+ // No. Delete the generated image and try to run out of the dex files.
+ PruneDexCache(image_isa);
+ return nullptr;
+ }
+
// Note that we must not use the file descriptor associated with
// ScopedFlock::GetFile to Init the image file. We want the file
// descriptor (and the associated exclusive lock) to be released when
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 9434bfe..c0c6444 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -159,7 +159,11 @@
MutexLock mu(Thread::Current(), lock_);
auto found = mem_maps_.find(obj);
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
- return found->second->BaseSize();
+ size_t alloc_size = found->second->BaseSize();
+ if (usable_size != nullptr) {
+ *usable_size = alloc_size;
+ }
+ return alloc_size;
}
size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 161eba9..ff8b570 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -228,15 +228,6 @@
return bytes_freed;
}
-// Callback from rosalloc when it needs to increase the footprint
-extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) {
- Heap* heap = Runtime::Current()->GetHeap();
- RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
- DCHECK(rosalloc_space != nullptr);
- DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
- return rosalloc_space->MoreCore(increment);
-}
-
size_t RosAllocSpace::Trim() {
VLOG(heap) << "RosAllocSpace::Trim() ";
{
@@ -367,5 +358,19 @@
}
} // namespace space
+
+namespace allocator {
+
+// Callback from rosalloc when it needs to increase the footprint.
+void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
+ DCHECK(rosalloc_space != nullptr);
+ DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
+ return rosalloc_space->MoreCore(increment);
+}
+
+} // namespace allocator
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index b233805..486d79a 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -133,8 +133,8 @@
mark_bitmap_->SetName(temp_name);
}
-AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
- : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()) {
+AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps_in, space::Space* space_in)
+ : swap_bitmaps(swap_bitmaps_in), space(space_in), self(Thread::Current()) {
}
} // namespace space
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index eb6fe9c..bc870a6 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -44,6 +44,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
+ UNUSED(ptr);
}
ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9de0548..a868e68 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -32,6 +32,7 @@
: objects_allocated_(objects_allocated) {}
void operator()(mirror::Object* obj) const {
+ UNUSED(obj);
++*objects_allocated_;
}
@@ -76,30 +77,29 @@
<< ",name=\"" << GetName() << "\"]";
}
-mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*) {
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+size_t ZygoteSpace::AllocationSize(mirror::Object*, size_t*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
+size_t ZygoteSpace::Free(Thread*, mirror::Object*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+size_t ZygoteSpace::FreeList(Thread*, size_t, mirror::Object**) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
- size_t /*failed_alloc_bytes*/) {
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream&, size_t) {
UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index b10a55c..a347622 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -30,7 +30,9 @@
ALWAYS_INLINE MirrorType* Read() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) {
+ DCHECK(!IsNull());
callback(reinterpret_cast<mirror::Object**>(&root_), arg, thread_id, root_type);
+ DCHECK(!IsNull());
}
// This is only used by IrtIterator.
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index b0aadec..9ddaf61 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -27,7 +27,7 @@
template<size_t kNumReferences>
inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
: HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) {
- COMPILE_ASSERT(kNumReferences >= 1, stack_handle_scope_must_contain_at_least_1_reference);
+ static_assert(kNumReferences >= 1, "StackHandleScope must contain at least 1 reference");
// TODO: Figure out how to use a compile assert.
CHECK_EQ(&storage_[0], GetReferences());
for (size_t i = 0; i < kNumReferences; ++i) {
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index c55835d..2c4f0f9 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -100,7 +100,7 @@
}
// Offset of link within HandleScope, used by generated code.
- static size_t LinkOffset(size_t pointer_size) {
+ static size_t LinkOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
return 0;
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index a2d37b3..14d7432 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -637,8 +637,8 @@
// U4: size of identifiers. We're using addresses as IDs and our heap references are stored
// as uint32_t.
// Note of warning: hprof-conv hard-codes the size of identifiers to 4.
- COMPILE_ASSERT(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(uint32_t),
- UnexpectedHeapReferenceSize);
+ static_assert(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(uint32_t),
+ "Unexpected HeapReference size");
U4_TO_BUF_BE(buf, 0, sizeof(uint32_t));
fwrite(buf, 1, sizeof(uint32_t), header_fp_);
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index 0ca32fe..e165a75 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -431,9 +431,8 @@
// A signal handler called by a fault for an illegal instruction. We record the fact in r0
// and then increment the PC in the signal context to return to the next instruction. We know the
// instruction is an sdiv (4 bytes long).
-static void bad_divide_inst_handle(int signo, siginfo_t* si, void* data) {
- UNUSED(signo);
- UNUSED(si);
+static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
+ void* data) {
#if defined(__arm__)
struct ucontext *uc = (struct ucontext *)data;
struct sigcontext *sc = &uc->uc_mcontext;
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index 529fa0c..84a3e80 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -370,7 +370,8 @@
// Use the lower 32b for the method pointer and the upper 32b for the code pointer.
static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
- uint32_t lo32 = static_cast<uint32_t>(lo);
+ static_assert(sizeof(uint32_t) == sizeof(uintptr_t), "Unexpected size difference");
+ uint32_t lo32 = lo;
uint64_t hi64 = static_cast<uint64_t>(hi);
return ((hi64 << 32) | lo32);
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index fc3da36..003e160 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -177,8 +177,9 @@
static void InstrumentationInstallStack(Thread* thread, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct InstallStackVisitor : public StackVisitor {
- InstallStackVisitor(Thread* thread, Context* context, uintptr_t instrumentation_exit_pc)
- : StackVisitor(thread, context), instrumentation_stack_(thread->GetInstrumentationStack()),
+ InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
+ : StackVisitor(thread_in, context),
+ instrumentation_stack_(thread_in->GetInstrumentationStack()),
instrumentation_exit_pc_(instrumentation_exit_pc),
reached_existing_instrumentation_frames_(false), instrumentation_stack_depth_(0),
last_return_pc_(0) {
@@ -316,12 +317,12 @@
static void InstrumentationRestoreStack(Thread* thread, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct RestoreStackVisitor : public StackVisitor {
- RestoreStackVisitor(Thread* thread, uintptr_t instrumentation_exit_pc,
+ RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
- : StackVisitor(thread, NULL), thread_(thread),
+ : StackVisitor(thread_in, NULL), thread_(thread_in),
instrumentation_exit_pc_(instrumentation_exit_pc),
instrumentation_(instrumentation),
- instrumentation_stack_(thread->GetInstrumentationStack()),
+ instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -595,6 +596,7 @@
}
static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg) {
+ UNUSED(arg);
thread->ResetQuickAllocEntryPointsForThread();
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3017bf6..646c7ae 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -103,13 +103,13 @@
class Instrumentation {
public:
enum InstrumentationEvent {
- kMethodEntered = 1 << 0,
- kMethodExited = 1 << 1,
- kMethodUnwind = 1 << 2,
- kDexPcMoved = 1 << 3,
- kFieldRead = 1 << 4,
- kFieldWritten = 1 << 5,
- kExceptionCaught = 1 << 6,
+ kMethodEntered = 1, // 1 << 0
+ kMethodExited = 2, // 1 << 1
+ kMethodUnwind = 4, // 1 << 2
+ kDexPcMoved = 8, // 1 << 3
+ kFieldRead = 16, // 1 << 4,
+ kFieldWritten = 32, // 1 << 5
+ kExceptionCaught = 64, // 1 << 6
};
Instrumentation();
@@ -464,6 +464,7 @@
DISALLOW_COPY_AND_ASSIGN(Instrumentation);
};
+std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationEvent& rhs);
// An element in the instrumentation side stack maintained in art::Thread.
struct InstrumentationStackFrame {
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index f6e6661..95186c6 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -29,39 +29,34 @@
namespace art {
InternTable::InternTable()
- : log_new_roots_(false), allow_new_interns_(true),
+ : image_added_to_intern_table_(false), log_new_roots_(false),
+ allow_new_interns_(true),
new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
}
size_t InternTable::Size() const {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return strong_interns_.size() + weak_interns_.size();
+ return strong_interns_.Size() + weak_interns_.Size();
}
size_t InternTable::StrongSize() const {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return strong_interns_.size();
+ return strong_interns_.Size();
}
size_t InternTable::WeakSize() const {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return weak_interns_.size();
+ return weak_interns_.Size();
}
void InternTable::DumpForSigQuit(std::ostream& os) const {
- MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- os << "Intern table: " << strong_interns_.size() << " strong; "
- << weak_interns_.size() << " weak\n";
+ os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
}
void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (auto& strong_intern : strong_interns_) {
- const_cast<GcRoot<mirror::String>&>(strong_intern).
- VisitRoot(callback, arg, 0, kRootInternedString);
- DCHECK(!strong_intern.IsNull());
- }
+ strong_interns_.VisitRoots(callback, arg, flags);
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_strong_intern_roots_) {
mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
@@ -71,10 +66,8 @@
// The GC moved a root in the log. Need to search the strong interns and update the
// corresponding object. This is slow, but luckily for us, this may only happen with a
// concurrent moving GC.
- auto it = strong_interns_.find(GcRoot<mirror::String>(old_ref));
- DCHECK(it != strong_interns_.end());
- strong_interns_.erase(it);
- strong_interns_.insert(GcRoot<mirror::String>(new_ref));
+ strong_interns_.Remove(old_ref);
+ strong_interns_.Insert(new_ref);
}
}
}
@@ -91,21 +84,17 @@
}
mirror::String* InternTable::LookupStrong(mirror::String* s) {
- return Lookup(&strong_interns_, s);
+ return strong_interns_.Find(s);
}
mirror::String* InternTable::LookupWeak(mirror::String* s) {
- // Weak interns need a read barrier because they are weak roots.
- return Lookup(&weak_interns_, s);
+ return weak_interns_.Find(s);
}
-mirror::String* InternTable::Lookup(Table* table, mirror::String* s) {
- Locks::intern_table_lock_->AssertHeld(Thread::Current());
- auto it = table->find(GcRoot<mirror::String>(s));
- if (LIKELY(it != table->end())) {
- return const_cast<GcRoot<mirror::String>&>(*it).Read();
- }
- return nullptr;
+void InternTable::SwapPostZygoteWithPreZygote() {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ weak_interns_.SwapPostZygoteWithPreZygote();
+ strong_interns_.SwapPostZygoteWithPreZygote();
}
mirror::String* InternTable::InsertStrong(mirror::String* s) {
@@ -116,7 +105,7 @@
if (log_new_roots_) {
new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
}
- strong_interns_.insert(GcRoot<mirror::String>(s));
+ strong_interns_.Insert(s);
return s;
}
@@ -125,12 +114,12 @@
if (runtime->IsActiveTransaction()) {
runtime->RecordWeakStringInsertion(s);
}
- weak_interns_.insert(GcRoot<mirror::String>(s));
+ weak_interns_.Insert(s);
return s;
}
void InternTable::RemoveStrong(mirror::String* s) {
- Remove(&strong_interns_, s);
+ strong_interns_.Remove(s);
}
void InternTable::RemoveWeak(mirror::String* s) {
@@ -138,13 +127,7 @@
if (runtime->IsActiveTransaction()) {
runtime->RecordWeakStringRemoval(s);
}
- Remove(&weak_interns_, s);
-}
-
-void InternTable::Remove(Table* table, mirror::String* s) {
- auto it = table->find(GcRoot<mirror::String>(s));
- DCHECK(it != table->end());
- table->erase(it);
+ weak_interns_.Remove(s);
}
// Insert/remove methods used to undo changes made during an aborted transaction.
@@ -165,11 +148,39 @@
RemoveWeak(s);
}
-static mirror::String* LookupStringFromImage(mirror::String* s)
+void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ if (!image_added_to_intern_table_) {
+ mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+ mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
+ for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ const size_t num_strings = dex_file->NumStringIds();
+ for (size_t j = 0; j < num_strings; ++j) {
+ mirror::String* image_string = dex_cache->GetResolvedString(j);
+ if (image_string != nullptr) {
+ mirror::String* found = LookupStrong(image_string);
+ if (found == nullptr) {
+ InsertStrong(image_string);
+ } else {
+ DCHECK_EQ(found, image_string);
+ }
+ }
+ }
+ }
+ image_added_to_intern_table_ = true;
+ }
+}
+
+mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (image_added_to_intern_table_) {
+ return nullptr;
+ }
gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
- if (image == NULL) {
- return NULL; // No image present.
+ if (image == nullptr) {
+ return nullptr; // No image present.
}
mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
@@ -181,9 +192,9 @@
const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
if (string_id != NULL) {
uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
- mirror::String* image = dex_cache->GetResolvedString(string_idx);
- if (image != NULL) {
- return image;
+ mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
+ if (image_string != NULL) {
+ return image_string;
}
}
}
@@ -285,24 +296,12 @@
bool InternTable::ContainsWeak(mirror::String* s) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- const mirror::String* found = LookupWeak(s);
- return found == s;
+ return LookupWeak(s) == s;
}
void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
- // This does not need a read barrier because this is called by GC.
- GcRoot<mirror::String>& root = const_cast<GcRoot<mirror::String>&>(*it);
- mirror::Object* object = root.Read<kWithoutReadBarrier>();
- mirror::Object* new_object = callback(object, arg);
- if (new_object == nullptr) {
- it = weak_interns_.erase(it);
- } else {
- root = GcRoot<mirror::String>(down_cast<mirror::String*>(new_object));
- ++it;
- }
- }
+ weak_interns_.SweepWeaks(callback, arg);
}
std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) {
@@ -321,4 +320,69 @@
const_cast<GcRoot<mirror::String>&>(b).Read());
}
+void InternTable::Table::Remove(mirror::String* s) {
+ auto it = post_zygote_table_.find(GcRoot<mirror::String>(s));
+ if (it != post_zygote_table_.end()) {
+ post_zygote_table_.erase(it);
+ } else {
+ it = pre_zygote_table_.find(GcRoot<mirror::String>(s));
+ DCHECK(it != pre_zygote_table_.end());
+ pre_zygote_table_.erase(it);
+ }
+}
+
+mirror::String* InternTable::Table::Find(mirror::String* s) {
+ Locks::intern_table_lock_->AssertHeld(Thread::Current());
+ auto it = pre_zygote_table_.find(GcRoot<mirror::String>(s));
+ if (LIKELY(it != pre_zygote_table_.end())) {
+ return const_cast<GcRoot<mirror::String>&>(*it).Read();
+ }
+ it = post_zygote_table_.find(GcRoot<mirror::String>(s));
+ if (LIKELY(it != post_zygote_table_.end())) {
+ return const_cast<GcRoot<mirror::String>&>(*it).Read();
+ }
+ return nullptr;
+}
+
+void InternTable::Table::SwapPostZygoteWithPreZygote() {
+ CHECK(pre_zygote_table_.empty());
+ std::swap(pre_zygote_table_, post_zygote_table_);
+}
+
+void InternTable::Table::Insert(mirror::String* s) {
+ // Always insert the post zygote table, this gets swapped when we create the zygote to be the
+ // pre zygote table.
+ post_zygote_table_.insert(GcRoot<mirror::String>(s));
+}
+
+void InternTable::Table::VisitRoots(RootCallback* callback, void* arg,
+ VisitRootFlags flags ATTRIBUTE_UNUSED) {
+ for (auto& intern : pre_zygote_table_) {
+ const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString);
+ }
+ for (auto& intern : post_zygote_table_) {
+ const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString);
+ }
+}
+
+void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) {
+ SweepWeaks(&pre_zygote_table_, callback, arg);
+ SweepWeaks(&post_zygote_table_, callback, arg);
+}
+
+void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) {
+ for (auto it = set->begin(), end = set->end(); it != end;) {
+ // This does not need a read barrier because this is called by GC.
+ GcRoot<mirror::String>& root = const_cast<GcRoot<mirror::String>&>(*it);
+ mirror::Object* object = root.Read<kWithoutReadBarrier>();
+ mirror::Object* new_object = callback(object, arg);
+ if (new_object == nullptr) {
+ it = set->erase(it);
+ } else {
+ root = GcRoot<mirror::String>(new_object->AsString());
+ ++it;
+ }
+ }
+}
+
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index e3223c8..0bff7b9 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -26,6 +26,12 @@
namespace art {
+namespace gc {
+namespace space {
+class ImageSpace;
+} // namespace space
+} // namespace gc
+
enum VisitRootFlags : uint8_t;
namespace mirror {
@@ -66,9 +72,12 @@
bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t Size() const;
- size_t StrongSize() const;
- size_t WeakSize() const;
+ // Total number of interned strings.
+ size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Total number of weakly live interned strings.
+ size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Total number of strongly live interned strings.
+ size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -78,6 +87,14 @@
void DisallowNewInterns() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Adds all of the resolved image strings from the image space into the intern table. The
+ // advantage of doing this is preventing expensive DexFile::FindStringId calls.
+ void AddImageStringsToTable(gc::space::ImageSpace* image_space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages.
+ void SwapPostZygoteWithPreZygote()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
private:
class StringHashEquals {
public:
@@ -85,22 +102,60 @@
bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b)
NO_THREAD_SAFETY_ANALYSIS;
};
- typedef std::unordered_set<GcRoot<mirror::String>, StringHashEquals, StringHashEquals,
- TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> Table;
+
+ // Table which holds pre zygote and post zygote interned strings. There is one instance for
+ // weak interns and strong interns.
+ class Table {
+ public:
+ mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void Remove(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void SweepWeaks(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) {
+ return pre_zygote_table_.size() + post_zygote_table_.size();
+ }
+
+ private:
+ typedef std::unordered_set<GcRoot<mirror::String>, StringHashEquals, StringHashEquals,
+ TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
+
+ void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+
+ // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages
+ // caused by modifying the zygote intern table hash table. The pre zygote table are the
+ // interned strings which were interned before we created the zygote space. Post zygote is self
+ // explanatory.
+ UnorderedSet pre_zygote_table_;
+ UnorderedSet post_zygote_table_;
+ };
mirror::String* Insert(mirror::String* s, bool is_strong)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::String* LookupStrong(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* LookupWeak(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::String* Lookup(Table* table, mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertStrong(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertWeak(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void RemoveStrong(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -108,14 +163,16 @@
void RemoveWeak(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void Remove(Table* table, mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
// Transaction rollback access.
+ mirror::String* LookupStringFromImage(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void RemoveStrongFromTransaction(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -125,6 +182,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
friend class Transaction;
+ bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 9de12f2..18de133 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -315,6 +315,10 @@
kSwitchImpl, // Switch-based interpreter implementation.
kComputedGotoImplKind // Computed-goto-based interpreter implementation.
};
+static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
+ os << ((rhs == kSwitchImpl) ? "Switch-based interpreter" : "Computed-goto-based interpreter");
+ return os;
+}
#if !defined(__clang__)
static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
@@ -322,8 +326,7 @@
// Clang 3.4 fails to build the goto interpreter implementation.
static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+JValue ExecuteGotoImpl(Thread*, MethodHelper&, const DexFile::CodeItem*, ShadowFrame&, JValue) {
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 5c77b96..eb80c30 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -855,12 +855,12 @@
// Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
// going the reflective Dex way.
Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
ArtField* found = NULL;
ObjectArray<ArtField>* fields = klass->GetIFields();
for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
ArtField* f = fields->Get(i);
- if (name->Equals(f->GetName())) {
+ if (name2->Equals(f->GetName())) {
found = f;
}
}
@@ -868,14 +868,14 @@
fields = klass->GetSFields();
for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
ArtField* f = fields->Get(i);
- if (name->Equals(f->GetName())) {
+ if (name2->Equals(f->GetName())) {
found = f;
}
}
}
CHECK(found != NULL)
<< "Failed to find field in Class.getDeclaredField in un-started runtime. name="
- << name->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
+ << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
// TODO: getDeclaredField calls GetType once the field is found to ensure a
// NoClassDefFoundError is thrown if the field's type cannot be resolved.
Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 6a0aaf2..7f6303a 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -216,7 +216,7 @@
static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const int32_t kMinInt = std::numeric_limits<int32_t>::min();
+ constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
return false;
@@ -234,7 +234,7 @@
static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const int32_t kMinInt = std::numeric_limits<int32_t>::min();
+ constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
return false;
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 3a177eb..6350c56 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -15,6 +15,7 @@
*/
#include "interpreter_common.h"
+#include "safe_math.h"
namespace art {
namespace interpreter {
@@ -1634,22 +1635,22 @@
HANDLE_INSTRUCTION_START(ADD_INT)
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) +
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SUB_INT)
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) -
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_INT)
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) *
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
@@ -1713,22 +1714,22 @@
HANDLE_INSTRUCTION_START(ADD_LONG)
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) +
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SUB_LONG)
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) -
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_LONG)
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) *
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
@@ -1863,8 +1864,8 @@
HANDLE_INSTRUCTION_START(ADD_INT_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) +
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeAdd(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1872,8 +1873,8 @@
HANDLE_INSTRUCTION_START(SUB_INT_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) -
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1881,8 +1882,8 @@
HANDLE_INSTRUCTION_START(MUL_INT_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) *
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1960,8 +1961,8 @@
HANDLE_INSTRUCTION_START(ADD_LONG_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) +
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeAdd(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1969,8 +1970,8 @@
HANDLE_INSTRUCTION_START(SUB_LONG_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) -
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -1978,8 +1979,8 @@
HANDLE_INSTRUCTION_START(MUL_LONG_2ADDR) {
uint32_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) *
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
@@ -2146,22 +2147,22 @@
HANDLE_INSTRUCTION_START(ADD_INT_LIT16)
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
- inst->VRegC_22s());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(RSUB_INT)
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- inst->VRegC_22s() -
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ SafeSub(inst->VRegC_22s(),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_INT_LIT16)
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
- inst->VRegC_22s());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
@@ -2202,22 +2203,22 @@
HANDLE_INSTRUCTION_START(ADD_INT_LIT8)
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) +
- inst->VRegC_22b());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()),
+ inst->VRegC_22b()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(RSUB_INT_LIT8)
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- inst->VRegC_22b() -
- shadow_frame.GetVReg(inst->VRegB_22b()));
+ SafeSub(inst->VRegC_22b(),
+ shadow_frame.GetVReg(inst->VRegB_22b())));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MUL_INT_LIT8)
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) *
- inst->VRegC_22b());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()),
+ inst->VRegC_22b()));
ADVANCE(2);
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 9fb90f1..1b6f53e 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -15,6 +15,7 @@
*/
#include "interpreter_common.h"
+#include "safe_math.h"
namespace art {
namespace interpreter {
@@ -1495,25 +1496,26 @@
static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
- case Instruction::ADD_INT:
+ case Instruction::ADD_INT: {
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) +
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
+ }
case Instruction::SUB_INT:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) -
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::MUL_INT:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
- shadow_frame.GetVReg(inst->VRegB_23x()) *
- shadow_frame.GetVReg(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::DIV_INT: {
@@ -1577,29 +1579,29 @@
case Instruction::ADD_LONG:
PREAMBLE();
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) +
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::SUB_LONG:
PREAMBLE();
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) -
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::MUL_LONG:
PREAMBLE();
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
- shadow_frame.GetVRegLong(inst->VRegB_23x()) *
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
+ shadow_frame.GetVRegLong(inst->VRegC_23x())));
inst = inst->Next_2xx();
break;
case Instruction::DIV_LONG:
PREAMBLE();
DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
shadow_frame.GetVRegLong(inst->VRegB_23x()),
- shadow_frame.GetVRegLong(inst->VRegC_23x()));
+ shadow_frame.GetVRegLong(inst->VRegC_23x()));
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
break;
case Instruction::REM_LONG:
@@ -1724,9 +1726,8 @@
case Instruction::ADD_INT_2ADDR: {
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
- shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) +
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(vregA, SafeAdd(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1734,8 +1735,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) -
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1743,8 +1744,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVReg(vregA,
- shadow_frame.GetVReg(vregA) *
- shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1822,8 +1823,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) +
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeAdd(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1831,8 +1832,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) -
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeSub(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -1840,8 +1841,8 @@
PREAMBLE();
uint4_t vregA = inst->VRegA_12x(inst_data);
shadow_frame.SetVRegLong(vregA,
- shadow_frame.GetVRegLong(vregA) *
- shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ SafeMul(shadow_frame.GetVRegLong(vregA),
+ shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
}
@@ -2008,22 +2009,22 @@
case Instruction::ADD_INT_LIT16:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) +
- inst->VRegC_22s());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
inst = inst->Next_2xx();
break;
- case Instruction::RSUB_INT:
+ case Instruction::RSUB_INT_LIT16:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- inst->VRegC_22s() -
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)));
+ SafeSub(inst->VRegC_22s(),
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
inst = inst->Next_2xx();
break;
case Instruction::MUL_INT_LIT16:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) *
- inst->VRegC_22s());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s()));
inst = inst->Next_2xx();
break;
case Instruction::DIV_INT_LIT16: {
@@ -2064,22 +2065,19 @@
case Instruction::ADD_INT_LIT8:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) +
- inst->VRegC_22b());
+ SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
inst = inst->Next_2xx();
break;
case Instruction::RSUB_INT_LIT8:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- inst->VRegC_22b() -
- shadow_frame.GetVReg(inst->VRegB_22b()));
+ SafeSub(inst->VRegC_22b(), shadow_frame.GetVReg(inst->VRegB_22b())));
inst = inst->Next_2xx();
break;
case Instruction::MUL_INT_LIT8:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22b()) *
- inst->VRegC_22b());
+ SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
inst = inst->Next_2xx();
break;
case Instruction::DIV_INT_LIT8: {
diff --git a/runtime/interpreter/safe_math.h b/runtime/interpreter/safe_math.h
new file mode 100644
index 0000000..78b3539
--- /dev/null
+++ b/runtime/interpreter/safe_math.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_SAFE_MATH_H_
+#define ART_RUNTIME_INTERPRETER_SAFE_MATH_H_
+
+#include <functional>
+#include <type_traits>
+
+namespace art {
+namespace interpreter {
+
+// Declares a type which is the larger in bit size of the two template parameters.
+template <typename T1, typename T2>
+struct select_bigger {
+ typedef typename std::conditional<sizeof(T1) >= sizeof(T2), T1, T2>::type type;
+};
+
+// Perform signed arithmetic Op on 'a' and 'b' with defined wrapping behavior.
+template<template <typename OpT> class Op, typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeMath(T1 a, T2 b) {
+ typedef typename select_bigger<T1, T2>::type biggest_T;
+ typedef typename std::make_unsigned<biggest_T>::type unsigned_biggest_T;
+ static_assert(std::is_signed<T1>::value, "Expected T1 to be signed");
+ static_assert(std::is_signed<T2>::value, "Expected T2 to be signed");
+ unsigned_biggest_T val1 = static_cast<unsigned_biggest_T>(static_cast<biggest_T>(a));
+ unsigned_biggest_T val2 = static_cast<unsigned_biggest_T>(b);
+ return static_cast<biggest_T>(Op<unsigned_biggest_T>()(val1, val2));
+}
+
+// Perform signed a signed add on 'a' and 'b' with defined wrapping behavior.
+template<typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeAdd(T1 a, T2 b) {
+ return SafeMath<std::plus>(a, b);
+}
+
+// Perform signed a signed substract on 'a' and 'b' with defined wrapping behavior.
+template<typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeSub(T1 a, T2 b) {
+ return SafeMath<std::minus>(a, b);
+}
+
+// Perform signed a signed multiply on 'a' and 'b' with defined wrapping behavior.
+template<typename T1, typename T2>
+static inline typename select_bigger<T1, T2>::type SafeMul(T1 a, T2 b) {
+ return SafeMath<std::multiplies>(a, b);
+}
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_SAFE_MATH_H_
diff --git a/runtime/interpreter/safe_math_test.cc b/runtime/interpreter/safe_math_test.cc
new file mode 100644
index 0000000..28087a3
--- /dev/null
+++ b/runtime/interpreter/safe_math_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "safe_math.h"
+
+#include <limits>
+
+#include "gtest/gtest.h"
+
+namespace art {
+namespace interpreter {
+
+TEST(SafeMath, Add) {
+ // Adding 1 overflows 0x7ff... to 0x800... aka max and min.
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::max(), 1),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::max(), 1),
+ std::numeric_limits<int64_t>::min());
+
+ // Vanilla arithmetic should work too.
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::max() - 1, 1),
+ std::numeric_limits<int32_t>::max());
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::max() - 1, 1),
+ std::numeric_limits<int64_t>::max());
+
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::min() + 1, -1),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::min() + 1, -1),
+ std::numeric_limits<int64_t>::min());
+
+ EXPECT_EQ(SafeAdd(int32_t(-1), -1), -2);
+ EXPECT_EQ(SafeAdd(int64_t(-1), -1), -2);
+
+ EXPECT_EQ(SafeAdd(int32_t(1), 1), 2);
+ EXPECT_EQ(SafeAdd(int64_t(1), 1), 2);
+
+ EXPECT_EQ(SafeAdd(int32_t(-1), 1), 0);
+ EXPECT_EQ(SafeAdd(int64_t(-1), 1), 0);
+
+ EXPECT_EQ(SafeAdd(int32_t(1), -1), 0);
+ EXPECT_EQ(SafeAdd(int64_t(1), -1), 0);
+
+ // Test sign extension of smaller operand sizes.
+ EXPECT_EQ(SafeAdd(int32_t(1), int8_t(-1)), 0);
+ EXPECT_EQ(SafeAdd(int64_t(1), int8_t(-1)), 0);
+}
+
+TEST(SafeMath, Sub) {
+ // Subtracting 1 underflows 0x800... to 0x7ff... aka min and max.
+ EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::min(), 1),
+ std::numeric_limits<int32_t>::max());
+ EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::min(), 1),
+ std::numeric_limits<int64_t>::max());
+
+ // Vanilla arithmetic should work too.
+ EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::max() - 1, -1),
+ std::numeric_limits<int32_t>::max());
+ EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::max() - 1, -1),
+ std::numeric_limits<int64_t>::max());
+
+ EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::min() + 1, 1),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::min() + 1, 1),
+ std::numeric_limits<int64_t>::min());
+
+ EXPECT_EQ(SafeSub(int32_t(-1), -1), 0);
+ EXPECT_EQ(SafeSub(int64_t(-1), -1), 0);
+
+ EXPECT_EQ(SafeSub(int32_t(1), 1), 0);
+ EXPECT_EQ(SafeSub(int64_t(1), 1), 0);
+
+ EXPECT_EQ(SafeSub(int32_t(-1), 1), -2);
+ EXPECT_EQ(SafeSub(int64_t(-1), 1), -2);
+
+ EXPECT_EQ(SafeSub(int32_t(1), -1), 2);
+ EXPECT_EQ(SafeSub(int64_t(1), -1), 2);
+
+ // Test sign extension of smaller operand sizes.
+ EXPECT_EQ(SafeAdd(int32_t(1), int8_t(-1)), 0);
+ EXPECT_EQ(SafeAdd(int64_t(1), int8_t(-1)), 0);
+}
+
+TEST(SafeMath, Mul) {
+ // Multiplying by 2 overflows 0x7ff...f to 0xfff...e aka max and -2.
+ EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::max(), 2),
+ -2);
+ EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::max(), 2),
+ -2);
+
+ // Vanilla arithmetic should work too.
+ EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::max() / 2, 2),
+ std::numeric_limits<int32_t>::max() - 1); // -1 as LSB is lost by division.
+ EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::max() / 2, 2),
+ std::numeric_limits<int64_t>::max() - 1); // -1 as LSB is lost by division.
+
+ EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::min() / 2, 2),
+ std::numeric_limits<int32_t>::min());
+ EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::min() / 2, 2),
+ std::numeric_limits<int64_t>::min());
+
+ EXPECT_EQ(SafeMul(int32_t(-1), -1), 1);
+ EXPECT_EQ(SafeMul(int64_t(-1), -1), 1);
+
+ EXPECT_EQ(SafeMul(int32_t(1), 1), 1);
+ EXPECT_EQ(SafeMul(int64_t(1), 1), 1);
+
+ EXPECT_EQ(SafeMul(int32_t(-1), 1), -1);
+ EXPECT_EQ(SafeMul(int64_t(-1), 1), -1);
+
+ EXPECT_EQ(SafeMul(int32_t(1), -1), -1);
+ EXPECT_EQ(SafeMul(int64_t(1), -1), -1);
+
+ // Test sign extension of smaller operand sizes.
+ EXPECT_EQ(SafeMul(int32_t(1), int8_t(-1)), -1);
+ EXPECT_EQ(SafeMul(int64_t(1), int8_t(-1)), -1);
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index fe91bb6..df7d068 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -84,13 +84,13 @@
shutting_down_ = true;
int control_sock = this->control_sock_;
- int clientSock = this->clientSock;
+ int local_clientSock = this->clientSock;
/* clear these out so it doesn't wake up and try to reuse them */
this->control_sock_ = this->clientSock = -1;
- if (clientSock != -1) {
- shutdown(clientSock, SHUT_RDWR);
+ if (local_clientSock != -1) {
+ shutdown(local_clientSock, SHUT_RDWR);
}
if (control_sock != -1) {
diff --git a/runtime/jdwp/jdwp_bits.h b/runtime/jdwp/jdwp_bits.h
index 9f80cbe..f9cf9ca 100644
--- a/runtime/jdwp/jdwp_bits.h
+++ b/runtime/jdwp/jdwp_bits.h
@@ -68,7 +68,7 @@
// @deprecated
static inline void Set1(uint8_t* buf, uint8_t val) {
- *buf = (uint8_t)(val);
+ *buf = val;
}
// @deprecated
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index d1229b2..44f713c 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -138,7 +138,7 @@
}
}
-uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
+static uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
switch (eventKind) {
case EK_BREAKPOINT:
case EK_SINGLE_STEP:
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 7fdc18e..be34bd3 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1126,6 +1126,7 @@
static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index 4a80957..e8c0856 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -170,20 +170,20 @@
* for an open port.)
*/
void JdwpSocketState::Shutdown() {
- int listenSock = this->listenSock;
- int clientSock = this->clientSock;
+ int local_listenSock = this->listenSock;
+ int local_clientSock = this->clientSock;
/* clear these out so it doesn't wake up and try to reuse them */
this->listenSock = this->clientSock = -1;
/* "shutdown" dislodges blocking read() and accept() calls */
- if (listenSock != -1) {
- shutdown(listenSock, SHUT_RDWR);
- close(listenSock);
+ if (local_listenSock != -1) {
+ shutdown(local_listenSock, SHUT_RDWR);
+ close(local_listenSock);
}
- if (clientSock != -1) {
- shutdown(clientSock, SHUT_RDWR);
- close(clientSock);
+ if (local_clientSock != -1) {
+ shutdown(local_clientSock, SHUT_RDWR);
+ close(local_clientSock);
}
WakePipe();
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 4f34896..9123994 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -17,6 +17,7 @@
#include "object_registry.h"
#include "handle_scope-inl.h"
+#include "jni_internal.h"
#include "mirror/class.h"
#include "scoped_thread_state_change.h"
@@ -213,10 +214,10 @@
// Erase the object from the maps. Note object may be null if it's
// a weak ref and the GC has cleared it.
int32_t hash_code = entry->identity_hash_code;
- for (auto it = object_to_entry_.lower_bound(hash_code), end = object_to_entry_.end();
- it != end && it->first == hash_code; ++it) {
- if (entry == it->second) {
- object_to_entry_.erase(it);
+ for (auto inner_it = object_to_entry_.lower_bound(hash_code), end = object_to_entry_.end();
+ inner_it != end && inner_it->first == hash_code; ++inner_it) {
+ if (entry == inner_it->second) {
+ object_to_entry_.erase(inner_it);
break;
}
}
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 180e3d7..b2d3835 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -28,9 +28,9 @@
static constexpr size_t kLocalsInitial = 64; // Arbitrary.
-JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
- : self(self),
- vm(vm),
+JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in)
+ : self(self_in),
+ vm(vm_in),
local_ref_cookie(IRT_FIRST_SEGMENT),
locals(kLocalsInitial, kLocalsMax, kLocal),
check_jni(false),
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index ad06b85..67e52cb 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -196,8 +196,8 @@
// Failed to find type from the signature of the field.
DCHECK(soa.Self()->IsExceptionPending());
ThrowLocation throw_location;
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::Throwable> cause(hs.NewHandle(soa.Self()->GetException(&throw_location)));
+ StackHandleScope<1> hs2(soa.Self());
+ Handle<mirror::Throwable> cause(hs2.NewHandle(soa.Self()->GetException(&throw_location)));
soa.Self()->ClearException();
std::string temp;
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
@@ -1749,6 +1749,7 @@
}
static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
+ UNUSED(chars);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1777,7 +1778,7 @@
return bytes;
}
- static void ReleaseStringUTFChars(JNIEnv* env, jstring, const char* chars) {
+ static void ReleaseStringUTFChars(JNIEnv*, jstring, const char* chars) {
delete[] chars;
}
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index cf6f83c..c52578f 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -34,13 +34,13 @@
inline Monitor* LockWord::FatLockMonitor() const {
DCHECK_EQ(GetState(), kFatLocked);
- MonitorId mon_id = static_cast<MonitorId>(value_ & ~(kStateMask << kStateShift));
+ MonitorId mon_id = value_ & ~(kStateMask << kStateShift);
return MonitorPool::MonitorFromMonitorId(mon_id);
}
inline size_t LockWord::ForwardingAddress() const {
DCHECK_EQ(GetState(), kForwardingAddress);
- return static_cast<size_t>(value_ << kStateSize);
+ return value_ << kStateSize;
}
inline LockWord::LockWord() : value_(0) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 13cc3b0..2d5c71b 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -52,7 +52,7 @@
*/
class LockWord {
public:
- enum {
+ enum SizeShiftsAndMasks { // private marker to avoid generate-operator-out.py from processing.
// Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
kStateSize = 2,
// Number of bits to encode the thin lock owner.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index c118471..8303f84 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -28,7 +28,12 @@
#endif
#include "base/stringprintf.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
#include "ScopedFd.h"
+#pragma GCC diagnostic pop
+
#include "thread-inl.h"
#include "utils.h"
@@ -238,6 +243,9 @@
MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
+#ifndef __LP64__
+ UNUSED(low_4gb);
+#endif
if (byte_count == 0) {
return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index df1222c..9b003aa 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -175,6 +175,7 @@
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
+std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps);
} // namespace art
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index 4eb6d47..b3820be 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -32,7 +32,7 @@
class MemoryRegion FINAL : public ValueObject {
public:
MemoryRegion() : pointer_(nullptr), size_(0) {}
- MemoryRegion(void* pointer, uintptr_t size) : pointer_(pointer), size_(size) {}
+ MemoryRegion(void* pointer_in, uintptr_t size_in) : pointer_(pointer_in), size_(size_in) {}
void* pointer() const { return pointer_; }
size_t size() const { return size_; }
@@ -78,10 +78,10 @@
void CopyFrom(size_t offset, const MemoryRegion& from) const;
// Compute a sub memory region based on an existing one.
- MemoryRegion Subregion(uintptr_t offset, uintptr_t size) const {
- CHECK_GE(this->size(), size);
- CHECK_LE(offset, this->size() - size);
- return MemoryRegion(reinterpret_cast<void*>(start() + offset), size);
+ MemoryRegion Subregion(uintptr_t offset, uintptr_t size_in) const {
+ CHECK_GE(this->size(), size_in);
+ CHECK_LE(offset, this->size() - size_in);
+ return MemoryRegion(reinterpret_cast<void*>(start() + offset), size_in);
}
// Compute an extended memory region based on an existing one.
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7e1ad78..13f881d 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -81,6 +81,7 @@
// 64-bit. No overflow as component_count is 32-bit and the maximum
// component size is 8.
DCHECK_LE((1U << component_size_shift), 8U);
+ UNUSED(self);
#else
// 32-bit.
DCHECK_NE(header_size, 0U);
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 636be33..b92f017 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -58,8 +58,8 @@
if (current_dimension + 1 < dimensions->GetLength()) {
// Create a new sub-array in every element of the array.
for (int32_t i = 0; i < array_length; i++) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_component_type(hs.NewHandle(array_class->GetComponentType()));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> h_component_type(hs2.NewHandle(array_class->GetComponentType()));
Array* sub_array = RecursiveCreateMultiArray(self, h_component_type,
current_dimension + 1, dimensions);
if (UNLIKELY(sub_array == nullptr)) {
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index ca361f8..494fa2f 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -68,6 +68,10 @@
return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_));
}
+inline uint16_t ArtMethod::GetMethodIndexDuringLinking() {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_));
+}
+
inline uint32_t ArtMethod::GetDexMethodIndex() {
#ifdef ART_SEA_IR_MODE
// TODO: Re-add this check for (PORTABLE + SMALL + ) SEA IR when PORTABLE IS fixed!
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 6927f1d..08c0996 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -178,6 +178,9 @@
uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Doesn't do erroneous / unresolved class checks.
+ uint16_t GetMethodIndexDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetMethodIndex();
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 892bf44..5f72dbe 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -279,7 +279,7 @@
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
inline bool Class::ResolvedMethodAccessTest(Class* access_to, ArtMethod* method,
uint32_t method_idx, DexCache* dex_cache) {
- COMPILE_ASSERT(throw_on_failure || throw_invoke_type == kStatic, non_default_throw_invoke_type);
+ static_assert(throw_on_failure || throw_invoke_type == kStatic, "Non-default throw invoke type");
DCHECK_EQ(use_referrers_cache, dex_cache == nullptr);
if (UNLIKELY(!this->CanAccess(access_to))) {
// The referrer class can't access the method's declaring class but may still be able
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 8eafd6f..5665059 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -117,7 +117,7 @@
self->SetException(gc_safe_throw_location, old_exception.Get());
self->SetExceptionReportedToInstrumentation(is_exception_reported);
}
- COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32);
+ static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
SetField32Volatile<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
} else {
@@ -489,7 +489,10 @@
if (GetDexCache() == dex_cache) {
for (size_t i = 0; i < NumVirtualMethods(); ++i) {
ArtMethod* method = GetVirtualMethod(i);
- if (method->GetDexMethodIndex() == dex_method_idx) {
+ if (method->GetDexMethodIndex() == dex_method_idx &&
+ // A miranda method may have a different DexCache and is always created by linking,
+ // never *declared* in the class.
+ !method->IsMiranda()) {
return method;
}
}
@@ -625,8 +628,8 @@
HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
@@ -649,8 +652,8 @@
HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = FindStaticField(self, interface, dex_cache, dex_field_idx);
if (f != nullptr) {
return f;
@@ -677,8 +680,8 @@
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i)));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = interface->FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d9094fc..21cf53f 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -131,7 +131,7 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32);
+ static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
return static_cast<Status>(
GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
}
@@ -839,7 +839,7 @@
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsResolved() || IsErroneous()) << PrettyClass(this) << " status=" << GetStatus();
+ DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index c9e60bc..c451764 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -135,6 +135,7 @@
#else
LOG(FATAL) << "Unreachable";
UNREACHABLE();
+ UNUSED(rb_ptr);
#endif
}
@@ -156,6 +157,7 @@
DCHECK_EQ(new_ref.reference_, atomic_rb_ptr->LoadRelaxed());
return true;
#else
+ UNUSED(expected_rb_ptr, rb_ptr);
LOG(FATAL) << "Unreachable";
UNREACHABLE();
#endif
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 9578c97..4227723 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -135,11 +135,11 @@
return copy;
}
-int32_t Object::GenerateIdentityHashCode() {
- static AtomicInteger seed(987654321 + std::time(nullptr));
- int32_t expected_value, new_value;
+uint32_t Object::GenerateIdentityHashCode() {
+ static Atomic<uint32_t> seed(987654321U + std::time(nullptr));
+ uint32_t expected_value, new_value;
do {
- expected_value = static_cast<uint32_t>(seed.LoadRelaxed());
+ expected_value = seed.LoadRelaxed();
new_value = expected_value * 1103515245 + 12345;
} while ((expected_value & LockWord::kHashMask) == 0 ||
!seed.CompareExchangeWeakRelaxed(expected_value, new_value));
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index b2b2420..0ce5231 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -451,7 +451,7 @@
}
// Generate an identity hash code.
- static int32_t GenerateIdentityHashCode();
+ static uint32_t GenerateIdentityHashCode();
// A utility function that copies an object in a read barrier and
// write barrier-aware way. This is internally used by Clone() and
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 5020ced..0439428 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -543,7 +543,7 @@
thread->SetWaitNext(nullptr);
// Check to see if the thread is still waiting.
- MutexLock mu(self, *thread->GetWaitMutex());
+ MutexLock wait_mu(self, *thread->GetWaitMutex());
if (thread->GetWaitMonitor() != nullptr) {
thread->GetWaitConditionVariable()->Signal(self);
return;
@@ -992,12 +992,12 @@
for (size_t i = 0; i < monitor_enter_dex_pcs.size(); ++i) {
// The verifier works in terms of the dex pcs of the monitor-enter instructions.
// We want the registers used by those instructions (so we can read the values out of them).
- uint32_t dex_pc = monitor_enter_dex_pcs[i];
- uint16_t monitor_enter_instruction = code_item->insns_[dex_pc];
+ uint32_t monitor_dex_pc = monitor_enter_dex_pcs[i];
+ uint16_t monitor_enter_instruction = code_item->insns_[monitor_dex_pc];
// Quick sanity check.
if ((monitor_enter_instruction & 0xff) != Instruction::MONITOR_ENTER) {
- LOG(FATAL) << "expected monitor-enter @" << dex_pc << "; was "
+ LOG(FATAL) << "expected monitor-enter @" << monitor_dex_pc << "; was "
<< reinterpret_cast<void*>(monitor_enter_instruction);
}
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 5b92093..27678dc 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -53,6 +53,7 @@
static void ReleaseMonitor(Thread* self, Monitor* monitor) {
#ifndef __LP64__
+ UNUSED(self);
delete monitor;
#else
GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
@@ -61,6 +62,7 @@
static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
#ifndef __LP64__
+ UNUSED(self);
STLDeleteElements(monitors);
#else
GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
@@ -85,6 +87,7 @@
static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
#ifndef __LP64__
+ UNUSED(self);
return MonitorIdFromMonitor(mon);
#else
return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 704e041..adc7848 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -341,8 +341,7 @@
// Wake the watchdog.
{
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
+ ScopedObjectAccess soa(Thread::Current());
test->watchdog_object_.Get()->MonitorEnter(self); // Lock the object.
test->watchdog_object_.Get()->NotifyAll(self); // Wake up waiting parties.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 2d038cf..012e03e 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_DexFile.h"
+
#include <algorithm>
#include <set>
#include <fcntl.h>
@@ -43,13 +45,17 @@
#include "profiler.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "ScopedFd.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
#include "utils.h"
#include "well_known_classes.h"
#include "zip_archive.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "ScopedFd.h"
+#pragma GCC diagnostic pop
+
namespace art {
// A smart pointer that provides read-only access to a Java string's UTF chars.
diff --git a/runtime/native/dalvik_system_DexFile.h b/runtime/native/dalvik_system_DexFile.h
new file mode 100644
index 0000000..487df05
--- /dev/null
+++ b/runtime/native/dalvik_system_DexFile.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_dalvik_system_DexFile(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index ceff206..6c82eb2 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_VMDebug.h"
+
#include <string.h>
#include <unistd.h>
diff --git a/runtime/native/dalvik_system_VMDebug.h b/runtime/native/dalvik_system_VMDebug.h
new file mode 100644
index 0000000..b7eb8a8
--- /dev/null
+++ b/runtime/native/dalvik_system_VMDebug.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_dalvik_system_VMDebug(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index c35bb30..fdba43e 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_VMRuntime.h"
+
#include <limits.h>
#include "ScopedUtfChars.h"
@@ -38,7 +40,11 @@
#include "scoped_thread_state_change.h"
#include "thread.h"
#include "thread_list.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
#include "toStringArray.h"
+#pragma GCC diagnostic pop
namespace art {
@@ -166,13 +172,13 @@
return env->NewStringUTF(isa_string);
}
-static jboolean VMRuntime_is64Bit(JNIEnv* env, jobject) {
+static jboolean VMRuntime_is64Bit(JNIEnv*, jobject) {
bool is64BitMode = (sizeof(void*) == sizeof(uint64_t));
return is64BitMode ? JNI_TRUE : JNI_FALSE;
}
static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
- return Runtime::Current()->GetJavaVM()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
+ return down_cast<JNIEnvExt*>(env)->vm->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sdk_version) {
@@ -201,9 +207,10 @@
Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
}
-static void VMRuntime_updateProcessState(JNIEnv* env, jobject, jint process_state) {
- Runtime::Current()->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
- Runtime::Current()->UpdateProfilerState(process_state);
+static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
+ Runtime* runtime = Runtime::Current();
+ runtime->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
+ runtime->UpdateProfilerState(process_state);
}
static void VMRuntime_trimHeap(JNIEnv*, jobject) {
@@ -385,26 +392,26 @@
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != NULL);
mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
- for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
- mirror::String* string = dex_cache->GetResolvedString(i);
+ for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
+ mirror::String* string = dex_cache->GetResolvedString(j);
if (string != NULL) {
filled->num_strings++;
}
}
- for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- mirror::Class* klass = dex_cache->GetResolvedType(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
+ mirror::Class* klass = dex_cache->GetResolvedType(j);
if (klass != NULL) {
filled->num_types++;
}
}
- for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- mirror::ArtField* field = dex_cache->GetResolvedField(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
+ mirror::ArtField* field = dex_cache->GetResolvedField(j);
if (field != NULL) {
filled->num_fields++;
}
}
- for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
+ mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j);
if (method != NULL) {
filled->num_methods++;
}
@@ -449,14 +456,14 @@
Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
if (kPreloadDexCachesStrings) {
- for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
- PreloadDexCachesResolveString(dex_cache, i, strings);
+ for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
+ PreloadDexCachesResolveString(dex_cache, j, strings);
}
}
if (kPreloadDexCachesTypes) {
- for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), i);
+ for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
+ PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), j);
}
}
@@ -514,8 +521,9 @@
* for ART.
*/
static void VMRuntime_registerAppInfo(JNIEnv* env, jclass, jstring pkgName,
- jstring appDir, jstring procName) {
- const char *pkgNameChars = env->GetStringUTFChars(pkgName, NULL);
+ jstring appDir ATTRIBUTE_UNUSED,
+ jstring procName ATTRIBUTE_UNUSED) {
+ const char *pkgNameChars = env->GetStringUTFChars(pkgName, nullptr);
std::string profileFile = StringPrintf("/data/dalvik-cache/profiles/%s", pkgNameChars);
Runtime::Current()->StartProfiler(profileFile.c_str());
diff --git a/runtime/native/dalvik_system_VMRuntime.h b/runtime/native/dalvik_system_VMRuntime.h
new file mode 100644
index 0000000..795caa5
--- /dev/null
+++ b/runtime/native/dalvik_system_VMRuntime.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_dalvik_system_VMRuntime(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index eef1c46..e396dad 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_VMStack.h"
+
#include "jni_internal.h"
#include "nth_caller_visitor.h"
#include "mirror/art_method-inl.h"
@@ -87,8 +89,10 @@
static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap,
jobject javaSystem) {
struct ClosestUserClassLoaderVisitor : public StackVisitor {
- ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap, mirror::Object* system)
- : StackVisitor(thread, NULL), bootstrap(bootstrap), system(system), class_loader(NULL) {}
+ ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap_in,
+ mirror::Object* system_in)
+ : StackVisitor(thread, NULL), bootstrap(bootstrap_in), system(system_in),
+ class_loader(NULL) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(class_loader == NULL);
diff --git a/runtime/native/dalvik_system_VMStack.h b/runtime/native/dalvik_system_VMStack.h
new file mode 100644
index 0000000..5638f99
--- /dev/null
+++ b/runtime/native/dalvik_system_VMStack.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_dalvik_system_VMStack(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e469126..0966954 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "dalvik_system_ZygoteHooks.h"
+
#include <stdlib.h>
#include "debugger.h"
@@ -100,8 +102,7 @@
runtime->PreZygoteFork();
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
- Thread* self = Thread::Current();
- return reinterpret_cast<jlong>(self);
+ return reinterpret_cast<jlong>(ThreadForEnv(env));
}
static void ZygoteHooks_nativePostForkChild(JNIEnv* env, jclass, jlong token, jint debug_flags,
diff --git a/runtime/native/dalvik_system_ZygoteHooks.h b/runtime/native/dalvik_system_ZygoteHooks.h
new file mode 100644
index 0000000..ca0658d
--- /dev/null
+++ b/runtime/native/dalvik_system_ZygoteHooks.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_dalvik_system_ZygoteHooks(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index b11cbdf..1ea75f3 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Class.h"
+
#include "class_linker.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
diff --git a/runtime/native/java_lang_Class.h b/runtime/native/java_lang_Class.h
new file mode 100644
index 0000000..8f769c3
--- /dev/null
+++ b/runtime/native/java_lang_Class.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_Class(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index c1c6c26..27eae46 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_DexCache.h"
+
#include "dex_file.h"
#include "jni_internal.h"
#include "mirror/dex_cache.h"
diff --git a/runtime/native/java_lang_DexCache.h b/runtime/native/java_lang_DexCache.h
new file mode 100644
index 0000000..b1c1f5e
--- /dev/null
+++ b/runtime/native/java_lang_DexCache.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_DexCache(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
diff --git a/runtime/native/java_lang_Object.cc b/runtime/native/java_lang_Object.cc
index 4768f48..49cacdf 100644
--- a/runtime/native/java_lang_Object.cc
+++ b/runtime/native/java_lang_Object.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Object.h"
+
#include "jni_internal.h"
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
diff --git a/runtime/native/java_lang_Object.h b/runtime/native/java_lang_Object.h
new file mode 100644
index 0000000..c860571
--- /dev/null
+++ b/runtime/native/java_lang_Object.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_Object(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index f9a1cee..dc0cb7b 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Runtime.h"
+
#include <dlfcn.h>
#include <limits.h>
#include <unistd.h>
diff --git a/runtime/native/java_lang_Runtime.h b/runtime/native/java_lang_Runtime.h
new file mode 100644
index 0000000..ceda06b
--- /dev/null
+++ b/runtime/native/java_lang_Runtime.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_Runtime(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index d6b47eb..4ea2546 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_String.h"
+
#include "common_throws.h"
#include "jni_internal.h"
#include "mirror/string-inl.h"
diff --git a/runtime/native/java_lang_String.h b/runtime/native/java_lang_String.h
new file mode 100644
index 0000000..357eb3d
--- /dev/null
+++ b/runtime/native/java_lang_String.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_String(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 43681a7..f79be56 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_System.h"
+
#include "common_throws.h"
#include "gc/accounting/card_table-inl.h"
#include "jni_internal.h"
diff --git a/runtime/native/java_lang_System.h b/runtime/native/java_lang_System.h
new file mode 100644
index 0000000..e371fa5
--- /dev/null
+++ b/runtime/native/java_lang_System.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_System(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index c0c7265..0722a24 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Thread.h"
+
#include "common_throws.h"
#include "debugger.h"
#include "jni_internal.h"
diff --git a/runtime/native/java_lang_Thread.h b/runtime/native/java_lang_Thread.h
new file mode 100644
index 0000000..7700ce2
--- /dev/null
+++ b/runtime/native/java_lang_Thread.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_Thread(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_
diff --git a/runtime/native/java_lang_Throwable.cc b/runtime/native/java_lang_Throwable.cc
index 3ed4cfe..cb8a869 100644
--- a/runtime/native/java_lang_Throwable.cc
+++ b/runtime/native/java_lang_Throwable.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_Throwable.h"
+
#include "jni_internal.h"
#include "scoped_fast_native_object_access.h"
#include "thread.h"
diff --git a/runtime/native/java_lang_Throwable.h b/runtime/native/java_lang_Throwable.h
new file mode 100644
index 0000000..f9aea84
--- /dev/null
+++ b/runtime/native/java_lang_Throwable.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_Throwable(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index f6a46bd..45563d2 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_VMClassLoader.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
diff --git a/runtime/native/java_lang_VMClassLoader.h b/runtime/native/java_lang_VMClassLoader.h
new file mode 100644
index 0000000..bf8d94f
--- /dev/null
+++ b/runtime/native/java_lang_VMClassLoader.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_VMClassLoader(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_
diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc
index ad48ec0..0532c35 100644
--- a/runtime/native/java_lang_ref_FinalizerReference.cc
+++ b/runtime/native/java_lang_ref_FinalizerReference.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_ref_FinalizerReference.h"
+
#include "gc/heap.h"
#include "gc/reference_processor.h"
#include "jni_internal.h"
diff --git a/runtime/native/java_lang_ref_FinalizerReference.h b/runtime/native/java_lang_ref_FinalizerReference.h
new file mode 100644
index 0000000..848a7ad
--- /dev/null
+++ b/runtime/native/java_lang_ref_FinalizerReference.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_ref_FinalizerReference(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index 4f04d60..d232059 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_ref_Reference.h"
+
#include "gc/heap.h"
#include "gc/reference_processor.h"
#include "jni_internal.h"
diff --git a/runtime/native/java_lang_ref_Reference.h b/runtime/native/java_lang_ref_Reference.h
new file mode 100644
index 0000000..0cbf116
--- /dev/null
+++ b/runtime/native/java_lang_ref_Reference.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_ref_Reference(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index 763a664..1ffcbdf 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Array.h"
+
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file-inl.h"
diff --git a/runtime/native/java_lang_reflect_Array.h b/runtime/native/java_lang_reflect_Array.h
new file mode 100644
index 0000000..805bf79
--- /dev/null
+++ b/runtime/native/java_lang_reflect_Array.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_reflect_Array(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 0542aeb..3121a90 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Constructor.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/art_method.h"
diff --git a/runtime/native/java_lang_reflect_Constructor.h b/runtime/native/java_lang_reflect_Constructor.h
new file mode 100644
index 0000000..7baae97
--- /dev/null
+++ b/runtime/native/java_lang_reflect_Constructor.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_reflect_Constructor(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 7f5a611..a042620 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Field.h"
+
#include "class_linker.h"
#include "class_linker-inl.h"
#include "common_throws.h"
@@ -57,9 +59,8 @@
}
template<bool kAllowReferences>
-ALWAYS_INLINE inline static bool GetFieldValue(
- const ScopedFastNativeObjectAccess& soa, mirror::Object* o, mirror::ArtField* f,
- Primitive::Type field_type, JValue* value)
+ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::ArtField* f,
+ Primitive::Type field_type, JValue* value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
switch (field_type) {
@@ -148,7 +149,7 @@
// Get the field's value, boxing if necessary.
Primitive::Type field_type = f->GetTypeAsPrimitiveType();
JValue value;
- if (!GetFieldValue<true>(soa, o, f, field_type, &value)) {
+ if (!GetFieldValue<true>(o, f, field_type, &value)) {
DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
@@ -178,13 +179,13 @@
JValue field_value;
if (field_type == kPrimitiveType) {
// This if statement should get optimized out since we only pass in valid primitive types.
- if (UNLIKELY(!GetFieldValue<false>(soa, o, f, kPrimitiveType, &field_value))) {
+ if (UNLIKELY(!GetFieldValue<false>(o, f, kPrimitiveType, &field_value))) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
return field_value;
}
- if (!GetFieldValue<false>(soa, o, f, field_type, &field_value)) {
+ if (!GetFieldValue<false>(o, f, field_type, &field_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
@@ -232,9 +233,8 @@
return GetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, accessible).GetS();
}
-static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
- mirror::ArtField* f, Primitive::Type field_type, bool allow_references,
- const JValue& new_value)
+static void SetFieldValue(mirror::Object* o, mirror::ArtField* f, Primitive::Type field_type,
+ bool allow_references, const JValue& new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
switch (field_type) {
@@ -317,7 +317,7 @@
DCHECK(soa.Self()->IsExceptionPending());
return;
}
- SetFieldValue(soa, o, f, field_prim_type, true, unboxed_value);
+ SetFieldValue(o, f, field_prim_type, true, unboxed_value);
}
template<Primitive::Type kPrimitiveType>
@@ -350,7 +350,7 @@
}
// Write the value.
- SetFieldValue(soa, o, f, field_type, false, wide_value);
+ SetFieldValue(o, f, field_type, false, wide_value);
}
static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z,
diff --git a/runtime/native/java_lang_reflect_Field.h b/runtime/native/java_lang_reflect_Field.h
new file mode 100644
index 0000000..1739711
--- /dev/null
+++ b/runtime/native/java_lang_reflect_Field.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_reflect_Field(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index f029b16..9859746 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Method.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/art_method.h"
diff --git a/runtime/native/java_lang_reflect_Method.h b/runtime/native/java_lang_reflect_Method.h
new file mode 100644
index 0000000..3a93cd0
--- /dev/null
+++ b/runtime/native/java_lang_reflect_Method.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_reflect_Method(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index 07d670d..baf8b24 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_lang_reflect_Proxy.h"
+
#include "class_linker.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
diff --git a/runtime/native/java_lang_reflect_Proxy.h b/runtime/native/java_lang_reflect_Proxy.h
new file mode 100644
index 0000000..e25f0f7
--- /dev/null
+++ b/runtime/native/java_lang_reflect_Proxy.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_reflect_Proxy(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
index bf92e12..04f0ba0 100644
--- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "java_util_concurrent_atomic_AtomicLong.h"
+
#include "atomic.h"
#include "jni_internal.h"
diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.h b/runtime/native/java_util_concurrent_atomic_AtomicLong.h
new file mode 100644
index 0000000..990dc86
--- /dev/null
+++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_
+#define ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_util_concurrent_atomic_AtomicLong(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index 8b2aecb..0ab2979 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "org_apache_harmony_dalvik_ddmc_DdmServer.h"
+
#include "base/logging.h"
#include "debugger.h"
#include "jni_internal.h"
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h
new file mode 100644
index 0000000..9a4645c
--- /dev/null
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_
+#define ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_org_apache_harmony_dalvik_ddmc_DdmServer(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 45ef9ae..b74430f 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
+
#include "base/logging.h"
#include "base/mutex.h"
#include "debugger.h"
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h
new file mode 100644
index 0000000..736e4c8
--- /dev/null
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_
+#define ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 65dece0..17ebdff 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "sun_misc_Unsafe.h"
+
#include "gc/accounting/card_table-inl.h"
#include "jni_internal.h"
#include "mirror/array.h"
diff --git a/runtime/native/sun_misc_Unsafe.h b/runtime/native/sun_misc_Unsafe.h
new file mode 100644
index 0000000..93194f4
--- /dev/null
+++ b/runtime/native/sun_misc_Unsafe.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_
+#define ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_sun_misc_Unsafe(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index f598e27..c2c6b12 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -19,13 +19,14 @@
#include "nativebridge/native_bridge.h"
#include "base/logging.h"
+#include "base/macros.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
-const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
+static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
ScopedObjectAccess soa(env);
StackHandleScope<1> scope(soa.Self());
mirror::ArtMethod* m = soa.DecodeMethod(mid);
@@ -33,7 +34,7 @@
return mh.GetShorty();
}
-uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
if (clazz == nullptr)
return 0;
@@ -56,8 +57,8 @@
return native_method_count;
}
-uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count) {
+static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count) {
if ((clazz == nullptr) || (methods == nullptr)) {
return 0;
}
@@ -121,6 +122,8 @@
LOG(WARNING) << "Could not create mount namespace.";
}
android::PreInitializeNativeBridge(dir.c_str(), GetInstructionSetString(kRuntimeISA));
+#else
+ UNUSED(dir);
#endif
}
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index e9ad353..300abc9 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -26,11 +26,11 @@
NoopCompilerCallbacks() {}
~NoopCompilerCallbacks() {}
- bool MethodVerified(verifier::MethodVerifier* verifier) OVERRIDE {
+ bool MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
return true;
}
- void ClassRejected(ClassReference ref) OVERRIDE {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
// This is only used by compilers which need to be able to run without relocation even when it
// would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index 374a80e..a851f21 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -26,9 +26,9 @@
// Walks up the stack 'n' callers, when used with Thread::WalkStack.
struct NthCallerVisitor : public StackVisitor {
- NthCallerVisitor(Thread* thread, size_t n, bool include_runtime_and_upcalls = false)
- : StackVisitor(thread, NULL), n(n), include_runtime_and_upcalls_(include_runtime_and_upcalls),
- count(0), caller(NULL) {}
+ NthCallerVisitor(Thread* thread, size_t n_in, bool include_runtime_and_upcalls = false)
+ : StackVisitor(thread, NULL), n(n_in),
+ include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(NULL) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 6b64c25..1a97c35 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -504,35 +504,35 @@
} else if (StartsWith(option, "-verbose:")) {
std::vector<std::string> verbose_options;
Split(option.substr(strlen("-verbose:")), ',', &verbose_options);
- for (size_t i = 0; i < verbose_options.size(); ++i) {
- if (verbose_options[i] == "class") {
+ for (size_t j = 0; j < verbose_options.size(); ++j) {
+ if (verbose_options[j] == "class") {
gLogVerbosity.class_linker = true;
- } else if (verbose_options[i] == "compiler") {
+ } else if (verbose_options[j] == "compiler") {
gLogVerbosity.compiler = true;
- } else if (verbose_options[i] == "gc") {
+ } else if (verbose_options[j] == "gc") {
gLogVerbosity.gc = true;
- } else if (verbose_options[i] == "heap") {
+ } else if (verbose_options[j] == "heap") {
gLogVerbosity.heap = true;
- } else if (verbose_options[i] == "jdwp") {
+ } else if (verbose_options[j] == "jdwp") {
gLogVerbosity.jdwp = true;
- } else if (verbose_options[i] == "jni") {
+ } else if (verbose_options[j] == "jni") {
gLogVerbosity.jni = true;
- } else if (verbose_options[i] == "monitor") {
+ } else if (verbose_options[j] == "monitor") {
gLogVerbosity.monitor = true;
- } else if (verbose_options[i] == "profiler") {
+ } else if (verbose_options[j] == "profiler") {
gLogVerbosity.profiler = true;
- } else if (verbose_options[i] == "signals") {
+ } else if (verbose_options[j] == "signals") {
gLogVerbosity.signals = true;
- } else if (verbose_options[i] == "startup") {
+ } else if (verbose_options[j] == "startup") {
gLogVerbosity.startup = true;
- } else if (verbose_options[i] == "third-party-jni") {
+ } else if (verbose_options[j] == "third-party-jni") {
gLogVerbosity.third_party_jni = true;
- } else if (verbose_options[i] == "threads") {
+ } else if (verbose_options[j] == "threads") {
gLogVerbosity.threads = true;
- } else if (verbose_options[i] == "verifier") {
+ } else if (verbose_options[j] == "verifier") {
gLogVerbosity.verifier = true;
} else {
- Usage("Unknown -verbose option %s\n", verbose_options[i].c_str());
+ Usage("Unknown -verbose option %s\n", verbose_options[j].c_str());
return false;
}
}
@@ -741,7 +741,7 @@
}
void ParsedOptions::UsageMessageV(FILE* stream, const char* fmt, va_list ap) {
- hook_vfprintf_(stderr, fmt, ap);
+ hook_vfprintf_(stream, fmt, ap);
}
void ParsedOptions::UsageMessage(FILE* stream, const char* fmt, ...) {
diff --git a/runtime/profiler_options.h b/runtime/profiler_options.h
index e3ef697..1db2f05 100644
--- a/runtime/profiler_options.h
+++ b/runtime/profiler_options.h
@@ -26,6 +26,7 @@
kProfilerMethod, // Method only
kProfilerBoundedStack, // Methods with Dex PC on top of the stack
};
+std::ostream& operator<<(std::ostream& os, const ProfileDataType& rhs);
class ProfilerOptions {
public:
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index d8fc277..3415e8f 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -35,50 +35,38 @@
namespace art {
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET),
- check_iget_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE),
- check_iget_wide_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
- check_iget_object_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN),
- check_iget_boolean_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE),
- check_iget_byte_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR),
- check_iget_char_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT),
- check_iget_short_type);
-
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT),
- check_iput_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE),
- check_iput_wide_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT),
- check_iput_object_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN),
- check_iput_boolean_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE),
- check_iput_byte_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR),
- check_iput_char_type);
-COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT),
- check_iput_short_type);
-
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT), check_iget_iput_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), check_iget_iput_wide_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), check_iget_iput_object_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), check_iget_iput_boolean_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), check_iget_iput_byte_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), check_iget_iput_char_variant);
-COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
- InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), check_iget_iput_short_variant);
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
+ "iget_object type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN),
+ "iget_boolean type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE), "iget_byte type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR), "iget_char type");
+static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT), "iget_short type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT), "iput type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE), "iput_wide type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT),
+ "iput_object type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN),
+ "iput_boolean type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE), "iput_byte type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR), "iput_char type");
+static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT), "iput_short type");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT), "iget/iput variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), "iget/iput_wide variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), "iget/iput_object variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), "iget/iput_boolean variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), "iget/iput_byte variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), "iget/iput_char variant");
+static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant");
// This is used by compiler and debugger. We look into the dex cache for resolved methods and
// fields. However, in the context of the debugger, not all methods and fields are resolved. Since
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index a2ae397..a8d4308 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -118,7 +118,7 @@
uint32_t is_volatile : 1;
uint32_t field_offset : 31;
};
-COMPILE_ASSERT(sizeof(InlineIGetIPutData) == sizeof(uint64_t), InvalidSizeOfInlineIGetIPutData);
+static_assert(sizeof(InlineIGetIPutData) == sizeof(uint64_t), "Invalid size of InlineIGetIPutData");
struct InlineReturnArgData {
uint16_t arg;
@@ -127,7 +127,8 @@
uint16_t reserved : 14;
uint32_t reserved2;
};
-COMPILE_ASSERT(sizeof(InlineReturnArgData) == sizeof(uint64_t), InvalidSizeOfInlineReturnArgData);
+static_assert(sizeof(InlineReturnArgData) == sizeof(uint64_t),
+ "Invalid size of InlineReturnArgData");
struct InlineMethod {
InlineMethodOpcode opcode;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index c58735a..90c9fe7 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -297,7 +297,7 @@
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
class InstrumentationStackVisitor : public StackVisitor {
public:
- InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_depth)
+ InstrumentationStackVisitor(Thread* self, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, nullptr),
frame_depth_(frame_depth),
@@ -332,7 +332,7 @@
void QuickExceptionHandler::UpdateInstrumentationStack() {
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_depth_);
+ InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
visitor.WalkStack(true);
size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index fd43d78..0dc31e7 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -27,9 +27,7 @@
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
// Unused for now.
- UNUSED(obj);
- UNUSED(offset);
- UNUSED(ref_addr);
+ UNUSED(obj, offset, ref_addr);
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
// To be implemented.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 228d200..44d1bc4 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -219,8 +219,7 @@
PrettyDescriptor(found_descriptor).c_str()).c_str());
}
- bool BuildArgArrayFromObjectArray(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::Object* receiver,
+ bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
mirror::ObjectArray<mirror::Object>* args, MethodHelper& mh)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::TypeList* classes = mh.GetMethod()->GetParameterTypeList();
@@ -613,7 +612,7 @@
ArgArray arg_array(shorty, shorty_len);
StackHandleScope<1> hs(soa.Self());
MethodHelper mh(hs.NewHandle(m));
- if (!arg_array.BuildArgArrayFromObjectArray(soa, receiver, objects, mh)) {
+ if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, mh)) {
CHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index f8e0f47..eca1800 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -115,8 +115,8 @@
*receiver = nullptr;
} else {
// Ensure class is initialized before allocating object
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(c));
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::Class> h_class(hs2.NewHandle(c));
bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
CHECK(initialized);
*receiver = c->AllocObject(self);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index db7936c..1cda29b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -69,6 +69,31 @@
#include "mirror/throwable.h"
#include "monitor.h"
#include "native_bridge_art_interface.h"
+#include "native/dalvik_system_DexFile.h"
+#include "native/dalvik_system_VMDebug.h"
+#include "native/dalvik_system_VMRuntime.h"
+#include "native/dalvik_system_VMStack.h"
+#include "native/dalvik_system_ZygoteHooks.h"
+#include "native/java_lang_Class.h"
+#include "native/java_lang_DexCache.h"
+#include "native/java_lang_Object.h"
+#include "native/java_lang_ref_FinalizerReference.h"
+#include "native/java_lang_reflect_Array.h"
+#include "native/java_lang_reflect_Constructor.h"
+#include "native/java_lang_reflect_Field.h"
+#include "native/java_lang_reflect_Method.h"
+#include "native/java_lang_reflect_Proxy.h"
+#include "native/java_lang_ref_Reference.h"
+#include "native/java_lang_Runtime.h"
+#include "native/java_lang_String.h"
+#include "native/java_lang_System.h"
+#include "native/java_lang_Thread.h"
+#include "native/java_lang_Throwable.h"
+#include "native/java_lang_VMClassLoader.h"
+#include "native/java_util_concurrent_atomic_AtomicLong.h"
+#include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
+#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
+#include "native/sun_misc_Unsafe.h"
#include "parsed_options.h"
#include "oat_file.h"
#include "os.h"
@@ -344,7 +369,7 @@
return true;
}
-jobject CreateSystemClassLoader() {
+static jobject CreateSystemClassLoader() {
if (Runtime::Current()->UseCompileTimeClassPath()) {
return NULL;
}
@@ -388,9 +413,9 @@
if (!patchoat_executable_.empty()) {
return patchoat_executable_;
}
- std::string patchoat_executable_(GetAndroidRoot());
- patchoat_executable_ += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
- return patchoat_executable_;
+ std::string patchoat_executable(GetAndroidRoot());
+ patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
+ return patchoat_executable;
}
std::string Runtime::GetCompilerExecutable() const {
@@ -412,8 +437,13 @@
started_ = true;
+ if (IsZygote()) {
+ ScopedObjectAccess soa(self);
+ Runtime::Current()->GetInternTable()->AddImageStringsToTable(heap_->GetImageSpace());
+ }
+
if (!IsImageDex2OatEnabled() || !Runtime::Current()->GetHeap()->HasImageSpace()) {
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
@@ -571,8 +601,7 @@
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-static bool OpenDexFilesFromImage(const std::vector<std::string>& dex_filenames,
- const std::string& image_location,
+static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<const DexFile*>& dex_files,
size_t* failures) {
std::string system_filename;
@@ -634,8 +663,7 @@
const std::string& image_location,
std::vector<const DexFile*>& dex_files) {
size_t failure_count = 0;
- if (!image_location.empty() && OpenDexFilesFromImage(dex_filenames, image_location, dex_files,
- &failure_count)) {
+ if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
}
failure_count = 0;
@@ -823,7 +851,7 @@
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!HasCalleeSaveMethod(type)) {
- SetCalleeSaveMethod(CreateCalleeSaveMethod(type), type);
+ SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
}
}
} else {
@@ -966,34 +994,31 @@
}
void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
-#define REGISTER(FN) extern void FN(JNIEnv*); FN(env)
- // Register Throwable first so that registration of other native methods can throw exceptions
- REGISTER(register_java_lang_Throwable);
- REGISTER(register_dalvik_system_DexFile);
- REGISTER(register_dalvik_system_VMDebug);
- REGISTER(register_dalvik_system_VMRuntime);
- REGISTER(register_dalvik_system_VMStack);
- REGISTER(register_dalvik_system_ZygoteHooks);
- REGISTER(register_java_lang_Class);
- REGISTER(register_java_lang_DexCache);
- REGISTER(register_java_lang_Object);
- REGISTER(register_java_lang_Runtime);
- REGISTER(register_java_lang_String);
- REGISTER(register_java_lang_System);
- REGISTER(register_java_lang_Thread);
- REGISTER(register_java_lang_VMClassLoader);
- REGISTER(register_java_lang_ref_FinalizerReference);
- REGISTER(register_java_lang_ref_Reference);
- REGISTER(register_java_lang_reflect_Array);
- REGISTER(register_java_lang_reflect_Constructor);
- REGISTER(register_java_lang_reflect_Field);
- REGISTER(register_java_lang_reflect_Method);
- REGISTER(register_java_lang_reflect_Proxy);
- REGISTER(register_java_util_concurrent_atomic_AtomicLong);
- REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmServer);
- REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmVmInternal);
- REGISTER(register_sun_misc_Unsafe);
-#undef REGISTER
+ register_dalvik_system_DexFile(env);
+ register_dalvik_system_VMDebug(env);
+ register_dalvik_system_VMRuntime(env);
+ register_dalvik_system_VMStack(env);
+ register_dalvik_system_ZygoteHooks(env);
+ register_java_lang_Class(env);
+ register_java_lang_DexCache(env);
+ register_java_lang_Object(env);
+ register_java_lang_ref_FinalizerReference(env);
+ register_java_lang_reflect_Array(env);
+ register_java_lang_reflect_Constructor(env);
+ register_java_lang_reflect_Field(env);
+ register_java_lang_reflect_Method(env);
+ register_java_lang_reflect_Proxy(env);
+ register_java_lang_ref_Reference(env);
+ register_java_lang_Runtime(env);
+ register_java_lang_String(env);
+ register_java_lang_System(env);
+ register_java_lang_Thread(env);
+ register_java_lang_Throwable(env);
+ register_java_lang_VMClassLoader(env);
+ register_java_util_concurrent_atomic_AtomicLong(env);
+ register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
+ register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
+ register_sun_misc_Unsafe(env);
}
void Runtime::DumpForSigQuit(std::ostream& os) {
@@ -1255,7 +1280,7 @@
return method.Get();
}
-mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(CalleeSaveType type) {
+mirror::ArtMethod* Runtime::CreateCalleeSaveMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 11db613..3cbe1e5 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -55,8 +55,8 @@
class Throwable;
} // namespace mirror
namespace verifier {
-class MethodVerifier;
-}
+ class MethodVerifier;
+} // namespace verifier
class ClassLinker;
class DexFile;
class InternTable;
@@ -379,8 +379,7 @@
void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
- mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -655,6 +654,7 @@
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
+std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
} // namespace art
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 0adf031..4408609 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -409,8 +409,8 @@
size_t StackVisitor::ComputeNumFrames(Thread* thread) {
struct NumFramesVisitor : public StackVisitor {
- explicit NumFramesVisitor(Thread* thread)
- : StackVisitor(thread, NULL), frames(0) {}
+ explicit NumFramesVisitor(Thread* thread_in)
+ : StackVisitor(thread_in, NULL), frames(0) {}
bool VisitFrame() OVERRIDE {
frames++;
@@ -461,8 +461,8 @@
void StackVisitor::DescribeStack(Thread* thread) {
struct DescribeStackVisitor : public StackVisitor {
- explicit DescribeStackVisitor(Thread* thread)
- : StackVisitor(thread, NULL) {}
+ explicit DescribeStackVisitor(Thread* thread_in)
+ : StackVisitor(thread_in, NULL) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
diff --git a/runtime/stack.h b/runtime/stack.h
index 2f8df61..66c840d 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -53,6 +53,7 @@
kImpreciseConstant,
kUndefined,
};
+std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
// A reference from the shadow stack to a MirrorType object within the Java heap.
template<class MirrorType>
@@ -336,9 +337,7 @@
}
#if defined(ART_USE_PORTABLE_COMPILER)
- enum ShadowFrameFlag {
- kHasReferenceArray = 1ul << 31
- };
+ constexpr uint32_t kHasReferenceArray = 1ul << 31;
// TODO: make const in the portable case.
uint32_t number_of_vregs_;
#else
@@ -633,6 +632,7 @@
}
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
+ UNUSED(isa);
// According to stack model, the first out is above the Method referernce.
return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
}
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index e1b5b91..94f7585 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -78,7 +78,9 @@
inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
if (kIsDebugBuild) {
- CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ if (gAborting == 0) {
+ CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ }
if (check_locks) {
bool bad_mutexes_held = false;
for (int i = kLockLevelCount - 1; i >= 0; --i) {
@@ -92,7 +94,9 @@
}
}
}
- CHECK(!bad_mutexes_held);
+ if (gAborting == 0) {
+ CHECK(!bad_mutexes_held);
+ }
}
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index da82c76..7d24562 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -837,10 +837,11 @@
}
struct StackDumpVisitor : public StackVisitor {
- StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
+ StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
- last_method(nullptr), last_line_number(0), repetition_count(0), frame_count(0) {
+ : StackVisitor(thread_in, context), os(os_in), thread(thread_in),
+ can_allocate(can_allocate_in), last_method(nullptr), last_line_number(0),
+ repetition_count(0), frame_count(0) {
}
virtual ~StackDumpVisitor() {
@@ -2151,7 +2152,6 @@
const VmapTable vmap_table(m->GetVmapTable(code_pointer));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
// For all dex registers in the bitmap
- StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
// Does this register hold a reference?
diff --git a/runtime/thread.h b/runtime/thread.h
index 694dbda..89aee04 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -927,7 +927,7 @@
// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
};
- COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size);
+ static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
static void ThreadExitCallback(void* arg);
@@ -963,8 +963,8 @@
}
union StateAndFlags state_and_flags;
- COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
- sizeof_state_and_flags_and_int32_are_different);
+ static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
+ "Size of state_and_flags and int32 are different");
// A non-zero value is used to tell the current thread to enter a safe point
// at the next poll.
@@ -1193,7 +1193,6 @@
};
std::ostream& operator<<(std::ostream& os, const Thread& thread);
-std::ostream& operator<<(std::ostream& os, const ThreadState& state);
} // namespace art
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index f8c8fdb..675ce9a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -220,7 +220,7 @@
// individual thread requires polling. delay_us is the requested sleep and total_delay_us
// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
+static void ThreadSuspendSleep(useconds_t* delay_us, useconds_t* total_delay_us) {
useconds_t new_delay_us = (*delay_us) * 2;
CHECK_GE(new_delay_us, *delay_us);
if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
@@ -241,7 +241,7 @@
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking) {
+ if (kDebugLocking && gAborting == 0) {
CHECK_NE(self->GetState(), kRunnable);
}
@@ -285,7 +285,7 @@
useconds_t total_delay_us = 0;
do {
useconds_t delay_us = 100;
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
} while (!thread->IsSuspended());
// Shouldn't need to wait for longer than 1000 microseconds.
constexpr useconds_t kLongWaitThresholdUS = 1000;
@@ -515,7 +515,7 @@
// than request thread suspension, to avoid potential cycles in threads requesting each other
// suspend.
ScopedObjectAccess soa(self);
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
thread = Thread::FromManagedThread(soa, peer);
if (thread == nullptr) {
ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
@@ -528,7 +528,7 @@
}
VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
{
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (request_suspension) {
thread->ModifySuspendCount(self, +1, debug_suspension);
request_suspension = false;
@@ -561,7 +561,7 @@
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByPeer sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
@@ -588,7 +588,7 @@
// than request thread suspension, to avoid potential cycles in threads requesting each other
// suspend.
ScopedObjectAccess soa(self);
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Thread* thread = nullptr;
for (const auto& it : list_) {
if (it->GetThreadId() == thread_id) {
@@ -606,7 +606,7 @@
VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
DCHECK(Contains(thread));
{
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (suspended_thread == nullptr) {
thread->ModifySuspendCount(self, +1, debug_suspension);
suspended_thread = thread;
@@ -639,7 +639,7 @@
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByThreadId sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
@@ -662,9 +662,9 @@
VLOG(threads) << *self << " SuspendAllForDebugger starting...";
{
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
{
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
// Update global suspend all state for attaching threads.
DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
++suspend_all_count_;
@@ -769,9 +769,9 @@
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
{
- MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
{
- MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
// Update global suspend all state for attaching threads.
DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
needs_resume = (debug_suspend_all_count_ > 0);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index e8c9ff8..587eb32 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -42,14 +42,14 @@
}
ThreadPoolWorker::~ThreadPoolWorker() {
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "thread pool worker shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "thread pool worker shutdown");
}
void ThreadPoolWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
thread_pool_->creation_barier_.Wait(self);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -58,11 +58,11 @@
void* ThreadPoolWorker::Callback(void* arg) {
ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
Runtime* runtime = Runtime::Current();
- CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, NULL, false));
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, nullptr, false));
// Do work until its time to shut down.
worker->Run();
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
void ThreadPool::AddTask(Thread* self, Task* task) {
@@ -89,8 +89,9 @@
max_active_workers_(num_threads) {
Thread* self = Thread::Current();
while (GetThreadCount() < num_threads) {
- const std::string name = StringPrintf("%s worker thread %zu", name_.c_str(), GetThreadCount());
- threads_.push_back(new ThreadPoolWorker(this, name, ThreadPoolWorker::kDefaultStackSize));
+ const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
+ GetThreadCount());
+ threads_.push_back(new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize));
}
// Wait for all of the threads to attach.
creation_barier_.Wait(self);
@@ -137,8 +138,8 @@
const size_t active_threads = thread_count - waiting_count_;
// <= since self is considered an active worker.
if (active_threads <= max_active_workers_) {
- Task* task = TryGetTaskLocked(self);
- if (task != NULL) {
+ Task* task = TryGetTaskLocked();
+ if (task != nullptr) {
return task;
}
}
@@ -157,28 +158,28 @@
--waiting_count_;
}
- // We are shutting down, return NULL to tell the worker thread to stop looping.
- return NULL;
+ // We are shutting down, return nullptr to tell the worker thread to stop looping.
+ return nullptr;
}
Task* ThreadPool::TryGetTask(Thread* self) {
MutexLock mu(self, task_queue_lock_);
- return TryGetTaskLocked(self);
+ return TryGetTaskLocked();
}
-Task* ThreadPool::TryGetTaskLocked(Thread* self) {
+Task* ThreadPool::TryGetTaskLocked() {
if (started_ && !tasks_.empty()) {
Task* task = tasks_.front();
tasks_.pop_front();
return task;
}
- return NULL;
+ return nullptr;
}
void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
if (do_work) {
- Task* task = NULL;
- while ((task = TryGetTask(self)) != NULL) {
+ Task* task = nullptr;
+ while ((task = TryGetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -201,17 +202,17 @@
WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name,
size_t stack_size)
- : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {}
+ : ThreadPoolWorker(thread_pool, name, stack_size), task_(nullptr) {}
void WorkStealingWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
WorkStealingThreadPool* thread_pool = down_cast<WorkStealingThreadPool*>(thread_pool_);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
WorkStealingTask* stealing_task = down_cast<WorkStealingTask*>(task);
{
- CHECK(task_ == NULL);
+ CHECK(task_ == nullptr);
MutexLock mu(self, thread_pool->work_steal_lock_);
// Register that we are running the task
++stealing_task->ref_count_;
@@ -221,7 +222,7 @@
// Mark ourselves as not running a task so that nobody tries to steal from us.
// There is a race condition that someone starts stealing from us at this point. This is okay
// due to the reference counting.
- task_ = NULL;
+ task_ = nullptr;
bool finalize;
@@ -229,13 +230,13 @@
// all that happens when the race occurs is that we steal some work instead of processing a
// task from the queue.
while (thread_pool->GetTaskCount(self) == 0) {
- WorkStealingTask* steal_from_task = NULL;
+ WorkStealingTask* steal_from_task = nullptr;
{
MutexLock mu(self, thread_pool->work_steal_lock_);
// Try finding a task to steal from.
- steal_from_task = thread_pool->FindTaskToStealFrom(self);
- if (steal_from_task != NULL) {
+ steal_from_task = thread_pool->FindTaskToStealFrom();
+ if (steal_from_task != nullptr) {
CHECK_NE(stealing_task, steal_from_task)
<< "Attempting to steal from completed self task";
steal_from_task->ref_count_++;
@@ -244,7 +245,7 @@
}
}
- if (steal_from_task != NULL) {
+ if (steal_from_task != nullptr) {
// Task which completed earlier is going to steal some work.
stealing_task->StealFrom(self, steal_from_task);
@@ -279,12 +280,13 @@
work_steal_lock_("work stealing lock"),
steal_index_(0) {
while (GetThreadCount() < num_threads) {
- const std::string name = StringPrintf("Work stealing worker %zu", GetThreadCount());
- threads_.push_back(new WorkStealingWorker(this, name, ThreadPoolWorker::kDefaultStackSize));
+ const std::string worker_name = StringPrintf("Work stealing worker %zu", GetThreadCount());
+ threads_.push_back(new WorkStealingWorker(this, worker_name,
+ ThreadPoolWorker::kDefaultStackSize));
}
}
-WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) {
+WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom() {
const size_t thread_count = GetThreadCount();
for (size_t i = 0; i < thread_count; ++i) {
// TODO: Use CAS instead of lock.
@@ -301,7 +303,7 @@
}
}
// Couldn't find something to steal.
- return NULL;
+ return nullptr;
}
WorkStealingThreadPool::~WorkStealingThreadPool() {}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index c816c84..d6330c8 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -101,7 +101,7 @@
// Try to get a task, returning NULL if there is none available.
Task* TryGetTask(Thread* self);
- Task* TryGetTaskLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
+ Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
// Are we shutting down?
bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) {
@@ -178,7 +178,7 @@
size_t steal_index_;
// Find a task to steal from
- WorkStealingTask* FindTaskToStealFrom(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
+ WorkStealingTask* FindTaskToStealFrom() EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
friend class WorkStealingWorker;
};
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index 0e47d21..6e5deeb 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_THREAD_STATE_H_
#define ART_RUNTIME_THREAD_STATE_H_
+#include <ostream>
+
namespace art {
enum ThreadState {
@@ -43,6 +45,7 @@
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
};
+std::ostream& operator<<(std::ostream& os, const ThreadState& rhs);
} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index b3158a4..29c01e4 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -244,7 +244,8 @@
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
-static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg) {
+static void ClearThreadStackTraceAndClockBase(Thread* thread ATTRIBUTE_UNUSED,
+ void* arg ATTRIBUTE_UNUSED) {
thread->SetTraceClockBase(0);
std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample();
thread->SetStackTraceSample(NULL);
@@ -561,27 +562,30 @@
void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t new_dex_pc) {
+ UNUSED(thread, this_object, method, new_dex_pc);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::MethodEntered(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -589,10 +593,9 @@
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodExited(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
- const JValue& return_value) {
- UNUSED(return_value);
+void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED,
+ const JValue& return_value ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -600,8 +603,8 @@
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -613,6 +616,7 @@
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, throw_location, catch_method, catch_dex_pc, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index b496f25..478066f 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -144,7 +144,7 @@
LogInternedString(log);
}
-void Transaction::LogInternedString(InternStringLog& log) {
+void Transaction::LogInternedString(const InternStringLog& log) {
Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
MutexLock mu(Thread::Current(), log_lock_);
intern_string_logs_.push_front(log);
@@ -384,7 +384,7 @@
}
break;
default:
- LOG(FATAL) << "Unknown value kind " << field_value.kind;
+ LOG(FATAL) << "Unknown value kind " << static_cast<int>(field_value.kind);
break;
}
}
@@ -406,38 +406,38 @@
void Transaction::InternStringLog::Undo(InternTable* intern_table) {
DCHECK(intern_table != nullptr);
switch (string_op_) {
- case InternStringLog::kInsert: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->RemoveStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->RemoveWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
+ case InternStringLog::kInsert: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->RemoveStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->RemoveWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
}
- case InternStringLog::kRemove: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->InsertStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->InsertWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
- }
- default:
- LOG(FATAL) << "Unknown interned string op";
- break;
+ break;
}
+ case InternStringLog::kRemove: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->InsertStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->InsertWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown interned string op";
+ break;
+ }
}
void Transaction::InternStringLog::VisitRoots(RootCallback* callback, void* arg) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 21d3c98..566f231 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -19,6 +19,7 @@
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/value_object.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "primitive.h"
@@ -35,7 +36,7 @@
}
class InternTable;
-class Transaction {
+class Transaction FINAL {
public:
Transaction();
~Transaction();
@@ -92,7 +93,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- class ObjectLog {
+ class ObjectLog : public ValueObject {
public:
void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
@@ -119,7 +120,7 @@
k64Bits,
kReference
};
- struct FieldValue {
+ struct FieldValue : public ValueObject {
// TODO use JValue instead ?
uint64_t value;
FieldValueKind kind;
@@ -134,7 +135,7 @@
std::map<uint32_t, FieldValue> field_values_;
};
- class ArrayLog {
+ class ArrayLog : public ValueObject {
public:
void LogValue(size_t index, uint64_t value);
@@ -153,7 +154,7 @@
std::map<size_t, uint64_t> array_values_;
};
- class InternStringLog {
+ class InternStringLog : public ValueObject {
public:
enum StringKind {
kStrongString,
@@ -175,11 +176,11 @@
private:
mirror::String* str_;
- StringKind string_kind_;
- StringOp string_op_;
+ const StringKind string_kind_;
+ const StringOp string_op_;
};
- void LogInternedString(InternStringLog& log)
+ void LogInternedString(const InternStringLog& log)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
LOCKS_EXCLUDED(log_lock_);
diff --git a/runtime/utf.cc b/runtime/utf.cc
index 02cbe3b..735815d 100644
--- a/runtime/utf.cc
+++ b/runtime/utf.cc
@@ -70,27 +70,27 @@
int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset,
size_t char_count) {
- int32_t hash = 0;
+ uint32_t hash = 0;
for (size_t i = 0; i < char_count; i++) {
hash = hash * 31 + chars->Get(offset + i);
}
- return hash;
+ return static_cast<int32_t>(hash);
}
int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count) {
- int32_t hash = 0;
+ uint32_t hash = 0;
while (char_count--) {
hash = hash * 31 + *chars++;
}
- return hash;
+ return static_cast<int32_t>(hash);
}
int32_t ComputeUtf8Hash(const char* chars) {
- int32_t hash = 0;
+ uint32_t hash = 0;
while (*chars != '\0') {
hash = hash * 31 + GetUtf16FromUtf8(&chars);
}
- return hash;
+ return static_cast<int32_t>(hash);
}
int CompareModifiedUtf8ToUtf16AsCodePointValues(const char* utf8_1, const uint16_t* utf8_2) {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 9c94f6c..0373708 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1109,8 +1109,8 @@
Split(cgroup_lines[i], ':', &cgroup_fields);
std::vector<std::string> cgroups;
Split(cgroup_fields[1], ',', &cgroups);
- for (size_t i = 0; i < cgroups.size(); ++i) {
- if (cgroups[i] == "cpu") {
+ for (size_t j = 0; j < cgroups.size(); ++j) {
+ if (cgroups[j] == "cpu") {
return cgroup_fields[2].substr(1); // Skip the leading slash.
}
}
@@ -1164,6 +1164,8 @@
}
os << "\n";
}
+#else
+ UNUSED(os, tid, prefix, current_method);
#endif
}
diff --git a/runtime/utils.h b/runtime/utils.h
index b7daa64..669fe6c 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -84,7 +84,7 @@
template<int n, typename T>
static inline bool IsAligned(T x) {
- COMPILE_ASSERT((n & (n - 1)) == 0, n_not_power_of_two);
+ static_assert((n & (n - 1)) == 0, "n is not a power of two");
return (x & (n - 1)) == 0;
}
@@ -222,7 +222,7 @@
// of V >= size of U (compile-time checked).
template<typename U, typename V>
static inline V bit_cast(U in) {
- COMPILE_ASSERT(sizeof(U) <= sizeof(V), size_of_u_not_le_size_of_v);
+ static_assert(sizeof(U) <= sizeof(V), "Size of U not <= size of V");
union {
U u;
V v;
@@ -467,15 +467,12 @@
template <typename A, typename B>
inline void operator() (A a, B b) const {
- UNUSED(a);
- UNUSED(b);
+ UNUSED(a, b);
}
template <typename A, typename B, typename C>
inline void operator() (A a, B b, C c) const {
- UNUSED(a);
- UNUSED(b);
- UNUSED(c);
+ UNUSED(a, b, c);
}
};
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index 36a6e55..e67067c 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -130,7 +130,8 @@
uint8_t flags_;
};
-COMPILE_ASSERT(sizeof(InstructionFlags) == sizeof(uint8_t), err);
+static_assert(sizeof(InstructionFlags) == sizeof(uint8_t),
+ "Size of InstructionFlags not equal to uint8_t");
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 8012451..2be47d1 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -88,6 +88,23 @@
}
}
+// Note: returns true on failure.
+ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
+ const char* error_msg, uint32_t work_insn_idx) {
+ if (kIsDebugBuild) {
+ // In a debug build, abort if the error condition is wrong.
+ DCHECK(condition) << error_msg << work_insn_idx;
+ } else {
+ // In a non-debug build, just fail the class.
+ if (!condition) {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << error_msg << work_insn_idx;
+ return true;
+ }
+ }
+
+ return false;
+}
+
MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
mirror::Class* klass,
bool allow_soft_failures,
@@ -1174,11 +1191,6 @@
return os;
}
-extern "C" void MethodVerifierGdbDump(MethodVerifier* v)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- v->Dump(std::cerr);
-}
-
void MethodVerifier::Dump(std::ostream& os) {
if (code_item_ == nullptr) {
os << "Native method\n";
@@ -2014,7 +2026,11 @@
while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
instance_of_idx--;
}
- CHECK(insn_flags_[instance_of_idx].IsOpcode());
+ if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+ "Unable to get previous instruction of if-eqz/if-nez for work index ",
+ work_insn_idx_)) {
+ break;
+ }
} else {
break;
}
@@ -2072,7 +2088,11 @@
while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
move_idx--;
}
- CHECK(insn_flags_[move_idx].IsOpcode());
+ if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+ "Unable to get previous instruction of if-eqz/if-nez for work index ",
+ work_insn_idx_)) {
+ break;
+ }
const Instruction* move_inst = Instruction::At(code_item_->insns_ + move_idx);
switch (move_inst->Opcode()) {
case Instruction::MOVE_OBJECT:
@@ -2625,7 +2645,7 @@
reg_types_.DoubleLo(), reg_types_.DoubleHi());
break;
case Instruction::ADD_INT_LIT16:
- case Instruction::RSUB_INT:
+ case Instruction::RSUB_INT_LIT16:
case Instruction::MUL_INT_LIT16:
case Instruction::DIV_INT_LIT16:
case Instruction::REM_INT_LIT16:
@@ -3035,7 +3055,12 @@
// odd case, but nothing to do
} else {
common_super = &common_super->Merge(exception, ®_types_);
- CHECK(reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super));
+ if (FailOrAbort(this,
+ reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super),
+ "java.lang.Throwable is not assignable-from common_super at ",
+ work_insn_idx_)) {
+ break;
+ }
}
}
}
@@ -3360,18 +3385,32 @@
if (klass->IsInterface()) {
// Derive Object.class from Class.class.getSuperclass().
mirror::Class* object_klass = klass->GetClass()->GetSuperClass();
- CHECK(object_klass->IsObjectClass());
+ if (FailOrAbort(this, object_klass->IsObjectClass(),
+ "Failed to find Object class in quickened invoke receiver",
+ work_insn_idx_)) {
+ return nullptr;
+ }
dispatch_class = object_klass;
} else {
dispatch_class = klass;
}
- CHECK(dispatch_class->HasVTable()) << PrettyDescriptor(dispatch_class);
+ if (FailOrAbort(this, dispatch_class->HasVTable(),
+ "Receiver class has no vtable for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK_LT(static_cast<int32_t>(vtable_index), dispatch_class->GetVTableLength())
- << PrettyDescriptor(klass) << " in method "
- << PrettyMethod(dex_method_idx_, *dex_file_, true);
+ if (FailOrAbort(this, static_cast<int32_t>(vtable_index) < dispatch_class->GetVTableLength(),
+ "Receiver class has not enough vtable slots for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
- CHECK(!self_->IsExceptionPending());
+ if (FailOrAbort(this, !Thread::Current()->IsExceptionPending(),
+ "Unexpected exception pending for quickened invoke at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
return res_method;
}
@@ -3384,7 +3423,14 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
return nullptr;
}
- CHECK(!res_method->IsDirect() && !res_method->IsStatic());
+ if (FailOrAbort(this, !res_method->IsDirect(), "Quick-invoked method is direct at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
+ if (FailOrAbort(this, !res_method->IsStatic(), "Quick-invoked method is static at ",
+ work_insn_idx_)) {
+ return nullptr;
+ }
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index 17bfe8f..76779ab 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -26,6 +26,8 @@
#include "sigchain.h"
+#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+
static void log(const char* format, ...) {
char buf[256];
va_list ap;
@@ -39,17 +41,23 @@
va_end(ap);
}
-extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+namespace art {
+
+
+extern "C" void ClaimSignalChain(int signal ATTRIBUTE_UNUSED,
+ struct sigaction* oldaction ATTRIBUTE_UNUSED) {
log("ClaimSignalChain is not exported by the main executable.");
abort();
}
-extern "C" void UnclaimSignalChain(int signal) {
+extern "C" void UnclaimSignalChain(int signal ATTRIBUTE_UNUSED) {
log("UnclaimSignalChain is not exported by the main executable.");
abort();
}
-extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+extern "C" void InvokeUserSignalHandler(int sig ATTRIBUTE_UNUSED,
+ siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context ATTRIBUTE_UNUSED) {
log("InvokeUserSignalHandler is not exported by the main executable.");
abort();
}
@@ -59,7 +67,10 @@
abort();
}
-extern "C" void EnsureFrontOfChain(int signal, struct sigaction* expected_action) {
+extern "C" void EnsureFrontOfChain(int signal ATTRIBUTE_UNUSED,
+ struct sigaction* expected_action ATTRIBUTE_UNUSED) {
log("EnsureFrontOfChain is not exported by the main executable.");
abort();
}
+
+} // namespace art
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 6fc4484..c2877be 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -172,7 +172,7 @@
constexpr size_t kByteReturnSize = 7;
jbyte byte_returns[kByteReturnSize] = { 0, 1, 2, 127, -1, -2, -128 };
-extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
+extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv*, jclass, jbyte b1, jbyte b2,
jbyte b3, jbyte b4, jbyte b5, jbyte b6,
jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
// We use b1 to drive the output.
@@ -197,7 +197,7 @@
static_cast<jshort>(0x8000) };
// The weird static_cast is because short int is only guaranteed down to -32767, not Java's -32768.
-extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
+extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv*, jclass, jshort s1, jshort s2,
jshort s3, jshort s4, jshort s5, jshort s6,
jshort s7, jshort s8, jshort s9, jshort s10) {
// We use s1 to drive the output.
@@ -217,7 +217,7 @@
return short_returns[s1];
}
-extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
+extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv*, jclass, jboolean b1,
jboolean b2, jboolean b3, jboolean b4,
jboolean b5, jboolean b6, jboolean b7,
jboolean b8, jboolean b9, jboolean b10) {
@@ -239,7 +239,7 @@
constexpr size_t kCharReturnSize = 8;
jchar char_returns[kCharReturnSize] = { 0, 1, 2, 127, 255, 256, 15000, 34000 };
-extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
+extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv*, jclass, jchar c1, jchar c2,
jchar c3, jchar c4, jchar c5, jchar c6, jchar c7,
jchar c8, jchar c9, jchar c10) {
// We use c1 to drive the output.
@@ -312,7 +312,7 @@
}
// http://b/16867274
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv* env,
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv*,
jclass) {
PthreadHelper(&testShallowGetCallingClassLoader);
}
@@ -350,7 +350,7 @@
// ourselves.
}
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv* env, jclass) {
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv*, jclass) {
PthreadHelper(&testShallowGetStackClass2);
}
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index a6d9b66..31371f6 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -14,14 +14,14 @@
* limitations under the License.
*/
+#include <jni.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/ucontext.h>
#include <unistd.h>
-#include "jni.h"
-
-#include <sys/ucontext.h>
+#include "base/macros.h"
static int signal_count;
static const int kMaxSignal = 2;
@@ -47,7 +47,8 @@
#endif
#endif
-static void signalhandler(int sig, siginfo_t* info, void* context) {
+static void signalhandler(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
printf("signal caught\n");
++signal_count;
if (signal_count > kMaxSignal) {
diff --git a/test/040-miranda/src/Main.java b/test/040-miranda/src/Main.java
index ff5eba0..65f4fb4 100644
--- a/test/040-miranda/src/Main.java
+++ b/test/040-miranda/src/Main.java
@@ -42,8 +42,8 @@
System.out.println("Test getting miranda method via reflection:");
try {
- Class mirandaClass = Class.forName("MirandaAbstract");
- Method mirandaMethod = mirandaClass.getDeclaredMethod("inInterface", (Class[]) null);
+ Class<?> mirandaClass = Class.forName("MirandaAbstract");
+ Method mirandaMethod = mirandaClass.getDeclaredMethod("inInterface");
System.out.println(" did not expect to find miranda method");
} catch (NoSuchMethodException nsme) {
System.out.println(" caught expected NoSuchMethodException");
diff --git a/test/040-miranda/src/MirandaAbstract.java b/test/040-miranda/src/MirandaAbstract.java
index 309ecca..c8cfa34 100644
--- a/test/040-miranda/src/MirandaAbstract.java
+++ b/test/040-miranda/src/MirandaAbstract.java
@@ -21,6 +21,8 @@
{
protected MirandaAbstract() { }
+ // These will be miranda methods, as the interfaces define them, but they are not
+ // implemented in this abstract class:
//public abstract boolean inInterface();
//public abstract int inInterface2();
diff --git a/test/040-miranda/src/MirandaClass.java b/test/040-miranda/src/MirandaClass.java
index 0d942f0..4160992 100644
--- a/test/040-miranda/src/MirandaClass.java
+++ b/test/040-miranda/src/MirandaClass.java
@@ -22,17 +22,14 @@
public MirandaClass() {}
public boolean inInterface() {
- //System.out.println(" MirandaClass inInterface");
return true;
}
public int inInterface2() {
- //System.out.println(" MirandaClass inInterface2");
return 27;
}
public boolean inAbstract() {
- //System.out.println(" MirandaClass inAbstract");
return false;
}
}
diff --git a/test/040-miranda/src/MirandaClass2.java b/test/040-miranda/src/MirandaClass2.java
index e9bdf2b..143eb37 100644
--- a/test/040-miranda/src/MirandaClass2.java
+++ b/test/040-miranda/src/MirandaClass2.java
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
class MirandaClass2 extends MirandaAbstract {
public boolean inInterface() {
return true;
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 23145e3..6bcc1f5 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -18,13 +18,14 @@
#include <algorithm>
#include <dlfcn.h>
+#include <jni.h>
#include <vector>
-#include "jni.h"
#include "stdio.h"
#include "unistd.h"
#include "sys/stat.h"
+#include "base/macros.h"
#include "nativebridge/native_bridge.h"
struct NativeBridgeMethod {
@@ -209,7 +210,8 @@
// NativeBridgeCallbacks implementations
extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* art_cbs,
- const char* app_code_cache_dir, const char* isa) {
+ const char* app_code_cache_dir,
+ const char* isa ATTRIBUTE_UNUSED) {
struct stat st;
if ((app_code_cache_dir != nullptr)
&& (stat(app_code_cache_dir, &st) == 0)
@@ -248,7 +250,7 @@
}
extern "C" void* native_bridge_getTrampoline(void* handle, const char* name, const char* shorty,
- uint32_t len) {
+ uint32_t len ATTRIBUTE_UNUSED) {
printf("Getting trampoline for %s with shorty %s.\n", name, shorty);
// The name here is actually the JNI name, so we can directly do the lookup.
diff --git a/test/116-nodex2oat/nodex2oat.cc b/test/116-nodex2oat/nodex2oat.cc
index 04cac45..564d58d 100644
--- a/test/116-nodex2oat/nodex2oat.cc
+++ b/test/116-nodex2oat/nodex2oat.cc
@@ -38,7 +38,7 @@
return NoDex2OatTest::hasOat(cls);
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass) {
return Runtime::Current()->IsDex2OatEnabled();
}
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
index a1293ae..5cc02d1 100644
--- a/test/117-nopatchoat/expected.txt
+++ b/test/117-nopatchoat/expected.txt
@@ -1,9 +1,9 @@
Run without dex2oat/patchoat
-dex2oat & patchoat are disabled, has oat is true, has executable oat is false.
+dex2oat & patchoat are disabled, has oat is true, has executable oat is expected.
This is a function call
Run with dexoat/patchoat
-dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
This is a function call
Run default
-dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+dex2oat & patchoat are enabled, has oat is true, has executable oat is expected.
This is a function call
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index 5994653..da276f2 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -24,18 +24,41 @@
class NoPatchoatTest {
public:
- static bool hasExecutableOat(jclass cls) {
+ static const OatFile::OatDexFile* getOatDexFile(jclass cls) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
const DexFile& dex_file = klass->GetDexFile();
+
const OatFile::OatDexFile* oat_dex_file =
Runtime::Current()->GetClassLinker()->FindOpenedOatDexFileForDexFile(dex_file);
+
+ return oat_dex_file;
+ }
+
+ static bool hasExecutableOat(jclass cls) {
+ const OatFile::OatDexFile* oat_dex_file = getOatDexFile(cls);
+
return oat_dex_file != nullptr && oat_dex_file->GetOatFile()->IsExecutable();
}
+
+ static bool isPic(jclass cls) {
+ const OatFile::OatDexFile* oat_dex_file = getOatDexFile(cls);
+
+ if (oat_dex_file == nullptr) {
+ return false;
+ }
+
+ const OatFile* oat_file = oat_dex_file->GetOatFile();
+ return oat_file->IsPic();
+ }
};
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasExecutableOat(JNIEnv*, jclass cls) {
return NoPatchoatTest::hasExecutableOat(cls);
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isPic(JNIEnv*, jclass cls) {
+ return NoPatchoatTest::isPic(cls);
+}
+
} // namespace art
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
index f3f91ce..7bc9dbb 100644
--- a/test/117-nopatchoat/src/Main.java
+++ b/test/117-nopatchoat/src/Main.java
@@ -16,9 +16,14 @@
public class Main {
public static void main(String[] args) {
+ boolean executable_correct = (isPic() ?
+ hasExecutableOat() == true :
+ hasExecutableOat() == isDex2OatEnabled());
+
System.out.println(
"dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
- ", has oat is " + hasOat() + ", has executable oat is " + hasExecutableOat() + ".");
+ ", has oat is " + hasOat() + ", has executable oat is " + (
+ executable_correct ? "expected" : "not expected") + ".");
if (!hasOat() && isDex2OatEnabled()) {
throw new Error("Application with dex2oat enabled runs without an oat file");
@@ -42,6 +47,8 @@
private native static boolean isDex2OatEnabled();
+ private native static boolean isPic();
+
private native static boolean hasOat();
private native static boolean hasExecutableOat();
diff --git a/test/118-noimage-dex2oat/noimage-dex2oat.cc b/test/118-noimage-dex2oat/noimage-dex2oat.cc
index 7340d9e..c49a13e 100644
--- a/test/118-noimage-dex2oat/noimage-dex2oat.cc
+++ b/test/118-noimage-dex2oat/noimage-dex2oat.cc
@@ -34,11 +34,11 @@
}
};
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv*, jclass) {
return Runtime::Current()->GetHeap()->HasImageSpace();
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv*, jclass) {
return Runtime::Current()->IsImageDex2OatEnabled();
}
diff --git a/test/126-miranda-multidex/build b/test/126-miranda-multidex/build
new file mode 100644
index 0000000..4c30f3f
--- /dev/null
+++ b/test/126-miranda-multidex/build
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+mkdir classes
+
+# All except Main
+${JAVAC} -d classes `find src -name '*.java'`
+rm classes/MirandaInterface.class
+${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
+
+# Only Main
+${JAVAC} -d classes `find src -name '*.java'`
+rm classes/Main.class classes/MirandaAbstract.class classes/MirandaClass*.class classes/MirandaInterface2*.class
+${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes
+
+zip $TEST_NAME.jar classes.dex classes2.dex
diff --git a/test/126-miranda-multidex/expected.txt b/test/126-miranda-multidex/expected.txt
new file mode 100644
index 0000000..dbe3717
--- /dev/null
+++ b/test/126-miranda-multidex/expected.txt
@@ -0,0 +1,32 @@
+MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+MirandaAbstract / MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+true 27
+MirandaAbstract / MirandaClass2:
+ inInterface: true
+ inInterface2: 28
+ inAbstract: true
+true 28
+Test getting miranda method via reflection:
+ caught expected NoSuchMethodException
+MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+MirandaAbstract / MirandaClass:
+ inInterface: true
+ inInterface2: 27
+ inAbstract: false
+true 27
+MirandaAbstract / MirandaClass2:
+ inInterface: true
+ inInterface2: 28
+ inAbstract: true
+true 28
+Test getting miranda method via reflection:
+ caught expected NoSuchMethodException
diff --git a/test/126-miranda-multidex/info.txt b/test/126-miranda-multidex/info.txt
new file mode 100644
index 0000000..ac50e2e
--- /dev/null
+++ b/test/126-miranda-multidex/info.txt
@@ -0,0 +1,2 @@
+This test ensures that cross-dex-file Miranda methods are correctly resolved.
+See b/18193682 for details.
diff --git a/test/126-miranda-multidex/run b/test/126-miranda-multidex/run
new file mode 100755
index 0000000..23c9935
--- /dev/null
+++ b/test/126-miranda-multidex/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} $@
+
+# The problem was first exposed in a no-verify setting, as that changes the resolution path
+# taken. Make sure we also test in that environment.
+${RUN} --no-verify ${@}
diff --git a/test/126-miranda-multidex/src/Main.java b/test/126-miranda-multidex/src/Main.java
new file mode 100644
index 0000000..8624378
--- /dev/null
+++ b/test/126-miranda-multidex/src/Main.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+/**
+ * Miranda testing.
+ */
+public class Main {
+ public static void main(String[] args) {
+ MirandaClass mir = new MirandaClass();
+ System.out.println("MirandaClass:");
+ System.out.println(" inInterface: " + mir.inInterface());
+ System.out.println(" inInterface2: " + mir.inInterface2());
+ System.out.println(" inAbstract: " + mir.inAbstract());
+
+ /* try again through abstract class; results should be identical */
+ MirandaAbstract mira = mir;
+ System.out.println("MirandaAbstract / MirandaClass:");
+ System.out.println(" inInterface: " + mira.inInterface());
+ System.out.println(" inInterface2: " + mira.inInterface2());
+ System.out.println(" inAbstract: " + mira.inAbstract());
+ mira.callMiranda();
+
+ MirandaAbstract mira2 = new MirandaClass2();
+ System.out.println("MirandaAbstract / MirandaClass2:");
+ System.out.println(" inInterface: " + mira2.inInterface());
+ System.out.println(" inInterface2: " + mira2.inInterface2());
+ System.out.println(" inAbstract: " + mira2.inAbstract());
+ mira2.callMiranda();
+
+ System.out.println("Test getting miranda method via reflection:");
+ try {
+ Class<?> mirandaClass = Class.forName("MirandaAbstract");
+ Method mirandaMethod = mirandaClass.getDeclaredMethod("inInterface");
+ System.out.println(" did not expect to find miranda method");
+ } catch (NoSuchMethodException nsme) {
+ System.out.println(" caught expected NoSuchMethodException");
+ } catch (Exception e) {
+ System.out.println(" caught unexpected exception " + e);
+ }
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaAbstract.java b/test/126-miranda-multidex/src/MirandaAbstract.java
new file mode 100644
index 0000000..c09a61f
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaAbstract.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Miranda testing.
+ */
+public abstract class MirandaAbstract implements MirandaInterface, MirandaInterface2
+{
+ protected MirandaAbstract() { }
+
+ // These will be miranda methods, as the interfaces define them, but they are not
+ // implemented in this abstract class:
+ //public abstract boolean inInterface();
+ //public abstract int inInterface2();
+
+ public boolean inAbstract() {
+ return true;
+ }
+
+ public void callMiranda() {
+ System.out.println(inInterface() + " " + inInterface2());
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaClass.java b/test/126-miranda-multidex/src/MirandaClass.java
new file mode 100644
index 0000000..7bb37e7
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaClass.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Miranda testing.
+ */
+public class MirandaClass extends MirandaAbstract {
+
+ public MirandaClass() {}
+
+ public boolean inInterface() {
+ return true;
+ }
+
+ public int inInterface2() {
+ return 27;
+ }
+
+ public boolean inAbstract() {
+ return false;
+ }
+
+ // Better not hit any of these...
+ public void inInterfaceDummy1() {
+ System.out.println("inInterfaceDummy1");
+ }
+ public void inInterfaceDummy2() {
+ System.out.println("inInterfaceDummy2");
+ }
+ public void inInterfaceDummy3() {
+ System.out.println("inInterfaceDummy3");
+ }
+ public void inInterfaceDummy4() {
+ System.out.println("inInterfaceDummy4");
+ }
+ public void inInterfaceDummy5() {
+ System.out.println("inInterfaceDummy5");
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaClass2.java b/test/126-miranda-multidex/src/MirandaClass2.java
new file mode 100644
index 0000000..797ead2
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaClass2.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class MirandaClass2 extends MirandaAbstract {
+ public boolean inInterface() {
+ return true;
+ }
+
+ public int inInterface2() {
+ return 28;
+ }
+
+ // Better not hit any of these...
+ public void inInterfaceDummy1() {
+ System.out.println("inInterfaceDummy1");
+ }
+ public void inInterfaceDummy2() {
+ System.out.println("inInterfaceDummy2");
+ }
+ public void inInterfaceDummy3() {
+ System.out.println("inInterfaceDummy3");
+ }
+ public void inInterfaceDummy4() {
+ System.out.println("inInterfaceDummy4");
+ }
+ public void inInterfaceDummy5() {
+ System.out.println("inInterfaceDummy5");
+ }
+}
diff --git a/test/126-miranda-multidex/src/MirandaInterface.java b/test/126-miranda-multidex/src/MirandaInterface.java
new file mode 100644
index 0000000..df12fcc
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaInterface.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Miranda testing.
+ */
+public interface MirandaInterface {
+
+ public boolean inInterface();
+
+ // A couple of dummy methods to fill the method table.
+ public void inInterfaceDummy1();
+ public void inInterfaceDummy2();
+ public void inInterfaceDummy3();
+ public void inInterfaceDummy4();
+ public void inInterfaceDummy5();
+
+}
diff --git a/test/126-miranda-multidex/src/MirandaInterface2.java b/test/126-miranda-multidex/src/MirandaInterface2.java
new file mode 100644
index 0000000..7c93fd0
--- /dev/null
+++ b/test/126-miranda-multidex/src/MirandaInterface2.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2006 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Miranda testing.
+ */
+public interface MirandaInterface2 {
+
+ public boolean inInterface();
+
+ public int inInterface2();
+
+}
diff --git a/test/127-secondarydex/build b/test/127-secondarydex/build
new file mode 100755
index 0000000..712774f
--- /dev/null
+++ b/test/127-secondarydex/build
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+
+mkdir classes-ex
+mv classes/Super.class classes-ex
+
+if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+ zip $TEST_NAME.jar classes.dex
+ ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+ zip ${TEST_NAME}-ex.jar classes.dex
+fi
diff --git a/test/127-secondarydex/expected.txt b/test/127-secondarydex/expected.txt
new file mode 100644
index 0000000..29a1411
--- /dev/null
+++ b/test/127-secondarydex/expected.txt
@@ -0,0 +1,3 @@
+testSlowPathDirectInvoke
+Test
+Got null pointer exception
diff --git a/test/127-secondarydex/info.txt b/test/127-secondarydex/info.txt
new file mode 100644
index 0000000..0479d1a
--- /dev/null
+++ b/test/127-secondarydex/info.txt
@@ -0,0 +1,3 @@
+Test features with a secondary dex file.
+
+- Regression test to ensure slow path of direct invoke does null check.
diff --git a/test/127-secondarydex/run b/test/127-secondarydex/run
new file mode 100755
index 0000000..d8c3c79
--- /dev/null
+++ b/test/127-secondarydex/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use secondary switch to add secondary dex file to class path.
+exec ${RUN} "${@}" --secondary
diff --git a/test/127-secondarydex/src/Main.java b/test/127-secondarydex/src/Main.java
new file mode 100644
index 0000000..c921c5b
--- /dev/null
+++ b/test/127-secondarydex/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+/**
+ * Secondary dex file test.
+ */
+public class Main {
+ public static void main(String[] args) {
+ testSlowPathDirectInvoke();
+ }
+
+ public static void testSlowPathDirectInvoke() {
+ System.out.println("testSlowPathDirectInvoke");
+ try {
+ Test t1 = new Test();
+ Test t2 = new Test();
+ Test t3 = null;
+ t1.test(t2);
+ t1.test(t3);
+ } catch (NullPointerException npe) {
+ System.out.println("Got null pointer exception");
+ } catch (Exception e) {
+ System.out.println("Got unexpected exception " + e);
+ }
+ }
+}
diff --git a/test/127-secondarydex/src/Super.java b/test/127-secondarydex/src/Super.java
new file mode 100644
index 0000000..7608d4a
--- /dev/null
+++ b/test/127-secondarydex/src/Super.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Super {
+ private void print() {
+ System.out.println("Super");
+ }
+}
diff --git a/test/127-secondarydex/src/Test.java b/test/127-secondarydex/src/Test.java
new file mode 100644
index 0000000..82cb901
--- /dev/null
+++ b/test/127-secondarydex/src/Test.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test extends Super {
+ public void test(Test t) {
+ t.print();
+ }
+
+ private void print() {
+ System.out.println("Test");
+ }
+}
diff --git a/test/418-const-string/expected.txt b/test/418-const-string/expected.txt
new file mode 100644
index 0000000..8254f87
--- /dev/null
+++ b/test/418-const-string/expected.txt
@@ -0,0 +1,2 @@
+Hello World
+Hello World
diff --git a/test/418-const-string/info.txt b/test/418-const-string/info.txt
new file mode 100644
index 0000000..b7a468f
--- /dev/null
+++ b/test/418-const-string/info.txt
@@ -0,0 +1 @@
+Small test case for testing CONST_STRING.
diff --git a/test/418-const-string/src/Main.java b/test/418-const-string/src/Main.java
new file mode 100644
index 0000000..7c1ffec
--- /dev/null
+++ b/test/418-const-string/src/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ // First call: may go in slow path.
+ System.out.println($opt$ReturnHelloWorld());
+ // Second call: no slow path.
+ System.out.println($opt$ReturnHelloWorld());
+ }
+
+ public static String $opt$ReturnHelloWorld() {
+ return "Hello World";
+ }
+}
diff --git a/test/419-long-parameter/expected.txt b/test/419-long-parameter/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/419-long-parameter/expected.txt
diff --git a/test/419-long-parameter/info.txt b/test/419-long-parameter/info.txt
new file mode 100644
index 0000000..5eac977
--- /dev/null
+++ b/test/419-long-parameter/info.txt
@@ -0,0 +1,3 @@
+Regression test for the long parameter passed both in stack and register
+on 32bits architectures. The move to hard float ABI makes it so that the
+register index does not necessarily match the stack index anymore.
diff --git a/test/419-long-parameter/src/Main.java b/test/419-long-parameter/src/Main.java
new file mode 100644
index 0000000..808b7f6
--- /dev/null
+++ b/test/419-long-parameter/src/Main.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ if ($opt$TestCallee(1.0, 2.0, 1L, 2L) != 1L) {
+ throw new Error("Unexpected result");
+ }
+ if ($opt$TestCaller() != 1L) {
+ throw new Error("Unexpected result");
+ }
+ }
+
+ public static long $opt$TestCallee(double a, double b, long c, long d) {
+ return d - c;
+ }
+
+ public static long $opt$TestCaller() {
+ return $opt$TestCallee(1.0, 2.0, 1L, 2L);
+ }
+}
diff --git a/test/420-const-class/expected.txt b/test/420-const-class/expected.txt
new file mode 100644
index 0000000..3213026
--- /dev/null
+++ b/test/420-const-class/expected.txt
@@ -0,0 +1,16 @@
+class Main
+class Main
+class Main$Other
+class Main$Other
+class java.lang.System
+class java.lang.System
+Hello from OtherWithClinit
+42
+class Main$OtherWithClinit
+42
+class Main$OtherWithClinit
+class Main$OtherWithClinit2
+Hello from OtherWithClinit2
+43
+class Main$OtherWithClinit2
+43
diff --git a/test/420-const-class/info.txt b/test/420-const-class/info.txt
new file mode 100644
index 0000000..81cbac7
--- /dev/null
+++ b/test/420-const-class/info.txt
@@ -0,0 +1 @@
+Test for the CONST_CLASS opcode.
diff --git a/test/420-const-class/src/Main.java b/test/420-const-class/src/Main.java
new file mode 100644
index 0000000..44a7436
--- /dev/null
+++ b/test/420-const-class/src/Main.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ static class Other {
+ }
+
+ static class OtherWithClinit {
+ static int a;
+ static {
+ System.out.println("Hello from OtherWithClinit");
+ a = 42;
+ }
+ }
+
+ static class OtherWithClinit2 {
+ static int a;
+ static {
+ System.out.println("Hello from OtherWithClinit2");
+ a = 43;
+ }
+ }
+
+ public static void main(String[] args) {
+ // Call methods twice in case they have a slow path.
+
+ System.out.println($opt$LoadThisClass());
+ System.out.println($opt$LoadThisClass());
+
+ System.out.println($opt$LoadOtherClass());
+ System.out.println($opt$LoadOtherClass());
+
+ System.out.println($opt$LoadSystemClass());
+ System.out.println($opt$LoadSystemClass());
+
+ $opt$ClinitCheckAndLoad();
+ $opt$ClinitCheckAndLoad();
+
+ $opt$LoadAndClinitCheck();
+ $opt$LoadAndClinitCheck();
+ }
+
+ public static Class $opt$LoadThisClass() {
+ return Main.class;
+ }
+
+ public static Class $opt$LoadOtherClass() {
+ return Other.class;
+ }
+
+ public static Class $opt$LoadSystemClass() {
+ return System.class;
+ }
+
+ public static void $opt$ClinitCheckAndLoad() {
+ System.out.println(OtherWithClinit.a);
+ System.out.println(OtherWithClinit.class);
+ }
+
+ public static void $opt$LoadAndClinitCheck() {
+ System.out.println(OtherWithClinit2.class);
+ System.out.println(OtherWithClinit2.a);
+ }
+}
diff --git a/test/421-large-frame/expected.txt b/test/421-large-frame/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/421-large-frame/expected.txt
diff --git a/test/421-large-frame/info.txt b/test/421-large-frame/info.txt
new file mode 100644
index 0000000..d71e7ee
--- /dev/null
+++ b/test/421-large-frame/info.txt
@@ -0,0 +1 @@
+Tests for large stack frames.
diff --git a/test/421-large-frame/src/Main.java b/test/421-large-frame/src/Main.java
new file mode 100644
index 0000000..01b89ba
--- /dev/null
+++ b/test/421-large-frame/src/Main.java
@@ -0,0 +1,2034 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void assertEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main(String[] args) {
+ // Sum[i = 0..999](i) = 999 * 1000 / 2 = 499500L.
+ assertEquals(499500L, $opt$LargeFrame());
+ }
+
+ static long $opt$LargeFrame() {
+ long l0 = 0L;
+ long l1 = 1L;
+ long l2 = 2L;
+ long l3 = 3L;
+ long l4 = 4L;
+ long l5 = 5L;
+ long l6 = 6L;
+ long l7 = 7L;
+ long l8 = 8L;
+ long l9 = 9L;
+ long l10 = 10L;
+ long l11 = 11L;
+ long l12 = 12L;
+ long l13 = 13L;
+ long l14 = 14L;
+ long l15 = 15L;
+ long l16 = 16L;
+ long l17 = 17L;
+ long l18 = 18L;
+ long l19 = 19L;
+ long l20 = 20L;
+ long l21 = 21L;
+ long l22 = 22L;
+ long l23 = 23L;
+ long l24 = 24L;
+ long l25 = 25L;
+ long l26 = 26L;
+ long l27 = 27L;
+ long l28 = 28L;
+ long l29 = 29L;
+ long l30 = 30L;
+ long l31 = 31L;
+ long l32 = 32L;
+ long l33 = 33L;
+ long l34 = 34L;
+ long l35 = 35L;
+ long l36 = 36L;
+ long l37 = 37L;
+ long l38 = 38L;
+ long l39 = 39L;
+ long l40 = 40L;
+ long l41 = 41L;
+ long l42 = 42L;
+ long l43 = 43L;
+ long l44 = 44L;
+ long l45 = 45L;
+ long l46 = 46L;
+ long l47 = 47L;
+ long l48 = 48L;
+ long l49 = 49L;
+ long l50 = 50L;
+ long l51 = 51L;
+ long l52 = 52L;
+ long l53 = 53L;
+ long l54 = 54L;
+ long l55 = 55L;
+ long l56 = 56L;
+ long l57 = 57L;
+ long l58 = 58L;
+ long l59 = 59L;
+ long l60 = 60L;
+ long l61 = 61L;
+ long l62 = 62L;
+ long l63 = 63L;
+ long l64 = 64L;
+ long l65 = 65L;
+ long l66 = 66L;
+ long l67 = 67L;
+ long l68 = 68L;
+ long l69 = 69L;
+ long l70 = 70L;
+ long l71 = 71L;
+ long l72 = 72L;
+ long l73 = 73L;
+ long l74 = 74L;
+ long l75 = 75L;
+ long l76 = 76L;
+ long l77 = 77L;
+ long l78 = 78L;
+ long l79 = 79L;
+ long l80 = 80L;
+ long l81 = 81L;
+ long l82 = 82L;
+ long l83 = 83L;
+ long l84 = 84L;
+ long l85 = 85L;
+ long l86 = 86L;
+ long l87 = 87L;
+ long l88 = 88L;
+ long l89 = 89L;
+ long l90 = 90L;
+ long l91 = 91L;
+ long l92 = 92L;
+ long l93 = 93L;
+ long l94 = 94L;
+ long l95 = 95L;
+ long l96 = 96L;
+ long l97 = 97L;
+ long l98 = 98L;
+ long l99 = 99L;
+ long l100 = 100L;
+ long l101 = 101L;
+ long l102 = 102L;
+ long l103 = 103L;
+ long l104 = 104L;
+ long l105 = 105L;
+ long l106 = 106L;
+ long l107 = 107L;
+ long l108 = 108L;
+ long l109 = 109L;
+ long l110 = 110L;
+ long l111 = 111L;
+ long l112 = 112L;
+ long l113 = 113L;
+ long l114 = 114L;
+ long l115 = 115L;
+ long l116 = 116L;
+ long l117 = 117L;
+ long l118 = 118L;
+ long l119 = 119L;
+ long l120 = 120L;
+ long l121 = 121L;
+ long l122 = 122L;
+ long l123 = 123L;
+ long l124 = 124L;
+ long l125 = 125L;
+ long l126 = 126L;
+ long l127 = 127L;
+ long l128 = 128L;
+ long l129 = 129L;
+ long l130 = 130L;
+ long l131 = 131L;
+ long l132 = 132L;
+ long l133 = 133L;
+ long l134 = 134L;
+ long l135 = 135L;
+ long l136 = 136L;
+ long l137 = 137L;
+ long l138 = 138L;
+ long l139 = 139L;
+ long l140 = 140L;
+ long l141 = 141L;
+ long l142 = 142L;
+ long l143 = 143L;
+ long l144 = 144L;
+ long l145 = 145L;
+ long l146 = 146L;
+ long l147 = 147L;
+ long l148 = 148L;
+ long l149 = 149L;
+ long l150 = 150L;
+ long l151 = 151L;
+ long l152 = 152L;
+ long l153 = 153L;
+ long l154 = 154L;
+ long l155 = 155L;
+ long l156 = 156L;
+ long l157 = 157L;
+ long l158 = 158L;
+ long l159 = 159L;
+ long l160 = 160L;
+ long l161 = 161L;
+ long l162 = 162L;
+ long l163 = 163L;
+ long l164 = 164L;
+ long l165 = 165L;
+ long l166 = 166L;
+ long l167 = 167L;
+ long l168 = 168L;
+ long l169 = 169L;
+ long l170 = 170L;
+ long l171 = 171L;
+ long l172 = 172L;
+ long l173 = 173L;
+ long l174 = 174L;
+ long l175 = 175L;
+ long l176 = 176L;
+ long l177 = 177L;
+ long l178 = 178L;
+ long l179 = 179L;
+ long l180 = 180L;
+ long l181 = 181L;
+ long l182 = 182L;
+ long l183 = 183L;
+ long l184 = 184L;
+ long l185 = 185L;
+ long l186 = 186L;
+ long l187 = 187L;
+ long l188 = 188L;
+ long l189 = 189L;
+ long l190 = 190L;
+ long l191 = 191L;
+ long l192 = 192L;
+ long l193 = 193L;
+ long l194 = 194L;
+ long l195 = 195L;
+ long l196 = 196L;
+ long l197 = 197L;
+ long l198 = 198L;
+ long l199 = 199L;
+ long l200 = 200L;
+ long l201 = 201L;
+ long l202 = 202L;
+ long l203 = 203L;
+ long l204 = 204L;
+ long l205 = 205L;
+ long l206 = 206L;
+ long l207 = 207L;
+ long l208 = 208L;
+ long l209 = 209L;
+ long l210 = 210L;
+ long l211 = 211L;
+ long l212 = 212L;
+ long l213 = 213L;
+ long l214 = 214L;
+ long l215 = 215L;
+ long l216 = 216L;
+ long l217 = 217L;
+ long l218 = 218L;
+ long l219 = 219L;
+ long l220 = 220L;
+ long l221 = 221L;
+ long l222 = 222L;
+ long l223 = 223L;
+ long l224 = 224L;
+ long l225 = 225L;
+ long l226 = 226L;
+ long l227 = 227L;
+ long l228 = 228L;
+ long l229 = 229L;
+ long l230 = 230L;
+ long l231 = 231L;
+ long l232 = 232L;
+ long l233 = 233L;
+ long l234 = 234L;
+ long l235 = 235L;
+ long l236 = 236L;
+ long l237 = 237L;
+ long l238 = 238L;
+ long l239 = 239L;
+ long l240 = 240L;
+ long l241 = 241L;
+ long l242 = 242L;
+ long l243 = 243L;
+ long l244 = 244L;
+ long l245 = 245L;
+ long l246 = 246L;
+ long l247 = 247L;
+ long l248 = 248L;
+ long l249 = 249L;
+ long l250 = 250L;
+ long l251 = 251L;
+ long l252 = 252L;
+ long l253 = 253L;
+ long l254 = 254L;
+ long l255 = 255L;
+ long l256 = 256L;
+ long l257 = 257L;
+ long l258 = 258L;
+ long l259 = 259L;
+ long l260 = 260L;
+ long l261 = 261L;
+ long l262 = 262L;
+ long l263 = 263L;
+ long l264 = 264L;
+ long l265 = 265L;
+ long l266 = 266L;
+ long l267 = 267L;
+ long l268 = 268L;
+ long l269 = 269L;
+ long l270 = 270L;
+ long l271 = 271L;
+ long l272 = 272L;
+ long l273 = 273L;
+ long l274 = 274L;
+ long l275 = 275L;
+ long l276 = 276L;
+ long l277 = 277L;
+ long l278 = 278L;
+ long l279 = 279L;
+ long l280 = 280L;
+ long l281 = 281L;
+ long l282 = 282L;
+ long l283 = 283L;
+ long l284 = 284L;
+ long l285 = 285L;
+ long l286 = 286L;
+ long l287 = 287L;
+ long l288 = 288L;
+ long l289 = 289L;
+ long l290 = 290L;
+ long l291 = 291L;
+ long l292 = 292L;
+ long l293 = 293L;
+ long l294 = 294L;
+ long l295 = 295L;
+ long l296 = 296L;
+ long l297 = 297L;
+ long l298 = 298L;
+ long l299 = 299L;
+ long l300 = 300L;
+ long l301 = 301L;
+ long l302 = 302L;
+ long l303 = 303L;
+ long l304 = 304L;
+ long l305 = 305L;
+ long l306 = 306L;
+ long l307 = 307L;
+ long l308 = 308L;
+ long l309 = 309L;
+ long l310 = 310L;
+ long l311 = 311L;
+ long l312 = 312L;
+ long l313 = 313L;
+ long l314 = 314L;
+ long l315 = 315L;
+ long l316 = 316L;
+ long l317 = 317L;
+ long l318 = 318L;
+ long l319 = 319L;
+ long l320 = 320L;
+ long l321 = 321L;
+ long l322 = 322L;
+ long l323 = 323L;
+ long l324 = 324L;
+ long l325 = 325L;
+ long l326 = 326L;
+ long l327 = 327L;
+ long l328 = 328L;
+ long l329 = 329L;
+ long l330 = 330L;
+ long l331 = 331L;
+ long l332 = 332L;
+ long l333 = 333L;
+ long l334 = 334L;
+ long l335 = 335L;
+ long l336 = 336L;
+ long l337 = 337L;
+ long l338 = 338L;
+ long l339 = 339L;
+ long l340 = 340L;
+ long l341 = 341L;
+ long l342 = 342L;
+ long l343 = 343L;
+ long l344 = 344L;
+ long l345 = 345L;
+ long l346 = 346L;
+ long l347 = 347L;
+ long l348 = 348L;
+ long l349 = 349L;
+ long l350 = 350L;
+ long l351 = 351L;
+ long l352 = 352L;
+ long l353 = 353L;
+ long l354 = 354L;
+ long l355 = 355L;
+ long l356 = 356L;
+ long l357 = 357L;
+ long l358 = 358L;
+ long l359 = 359L;
+ long l360 = 360L;
+ long l361 = 361L;
+ long l362 = 362L;
+ long l363 = 363L;
+ long l364 = 364L;
+ long l365 = 365L;
+ long l366 = 366L;
+ long l367 = 367L;
+ long l368 = 368L;
+ long l369 = 369L;
+ long l370 = 370L;
+ long l371 = 371L;
+ long l372 = 372L;
+ long l373 = 373L;
+ long l374 = 374L;
+ long l375 = 375L;
+ long l376 = 376L;
+ long l377 = 377L;
+ long l378 = 378L;
+ long l379 = 379L;
+ long l380 = 380L;
+ long l381 = 381L;
+ long l382 = 382L;
+ long l383 = 383L;
+ long l384 = 384L;
+ long l385 = 385L;
+ long l386 = 386L;
+ long l387 = 387L;
+ long l388 = 388L;
+ long l389 = 389L;
+ long l390 = 390L;
+ long l391 = 391L;
+ long l392 = 392L;
+ long l393 = 393L;
+ long l394 = 394L;
+ long l395 = 395L;
+ long l396 = 396L;
+ long l397 = 397L;
+ long l398 = 398L;
+ long l399 = 399L;
+ long l400 = 400L;
+ long l401 = 401L;
+ long l402 = 402L;
+ long l403 = 403L;
+ long l404 = 404L;
+ long l405 = 405L;
+ long l406 = 406L;
+ long l407 = 407L;
+ long l408 = 408L;
+ long l409 = 409L;
+ long l410 = 410L;
+ long l411 = 411L;
+ long l412 = 412L;
+ long l413 = 413L;
+ long l414 = 414L;
+ long l415 = 415L;
+ long l416 = 416L;
+ long l417 = 417L;
+ long l418 = 418L;
+ long l419 = 419L;
+ long l420 = 420L;
+ long l421 = 421L;
+ long l422 = 422L;
+ long l423 = 423L;
+ long l424 = 424L;
+ long l425 = 425L;
+ long l426 = 426L;
+ long l427 = 427L;
+ long l428 = 428L;
+ long l429 = 429L;
+ long l430 = 430L;
+ long l431 = 431L;
+ long l432 = 432L;
+ long l433 = 433L;
+ long l434 = 434L;
+ long l435 = 435L;
+ long l436 = 436L;
+ long l437 = 437L;
+ long l438 = 438L;
+ long l439 = 439L;
+ long l440 = 440L;
+ long l441 = 441L;
+ long l442 = 442L;
+ long l443 = 443L;
+ long l444 = 444L;
+ long l445 = 445L;
+ long l446 = 446L;
+ long l447 = 447L;
+ long l448 = 448L;
+ long l449 = 449L;
+ long l450 = 450L;
+ long l451 = 451L;
+ long l452 = 452L;
+ long l453 = 453L;
+ long l454 = 454L;
+ long l455 = 455L;
+ long l456 = 456L;
+ long l457 = 457L;
+ long l458 = 458L;
+ long l459 = 459L;
+ long l460 = 460L;
+ long l461 = 461L;
+ long l462 = 462L;
+ long l463 = 463L;
+ long l464 = 464L;
+ long l465 = 465L;
+ long l466 = 466L;
+ long l467 = 467L;
+ long l468 = 468L;
+ long l469 = 469L;
+ long l470 = 470L;
+ long l471 = 471L;
+ long l472 = 472L;
+ long l473 = 473L;
+ long l474 = 474L;
+ long l475 = 475L;
+ long l476 = 476L;
+ long l477 = 477L;
+ long l478 = 478L;
+ long l479 = 479L;
+ long l480 = 480L;
+ long l481 = 481L;
+ long l482 = 482L;
+ long l483 = 483L;
+ long l484 = 484L;
+ long l485 = 485L;
+ long l486 = 486L;
+ long l487 = 487L;
+ long l488 = 488L;
+ long l489 = 489L;
+ long l490 = 490L;
+ long l491 = 491L;
+ long l492 = 492L;
+ long l493 = 493L;
+ long l494 = 494L;
+ long l495 = 495L;
+ long l496 = 496L;
+ long l497 = 497L;
+ long l498 = 498L;
+ long l499 = 499L;
+ long l500 = 500L;
+ long l501 = 501L;
+ long l502 = 502L;
+ long l503 = 503L;
+ long l504 = 504L;
+ long l505 = 505L;
+ long l506 = 506L;
+ long l507 = 507L;
+ long l508 = 508L;
+ long l509 = 509L;
+ long l510 = 510L;
+ long l511 = 511L;
+ long l512 = 512L;
+ long l513 = 513L;
+ long l514 = 514L;
+ long l515 = 515L;
+ long l516 = 516L;
+ long l517 = 517L;
+ long l518 = 518L;
+ long l519 = 519L;
+ long l520 = 520L;
+ long l521 = 521L;
+ long l522 = 522L;
+ long l523 = 523L;
+ long l524 = 524L;
+ long l525 = 525L;
+ long l526 = 526L;
+ long l527 = 527L;
+ long l528 = 528L;
+ long l529 = 529L;
+ long l530 = 530L;
+ long l531 = 531L;
+ long l532 = 532L;
+ long l533 = 533L;
+ long l534 = 534L;
+ long l535 = 535L;
+ long l536 = 536L;
+ long l537 = 537L;
+ long l538 = 538L;
+ long l539 = 539L;
+ long l540 = 540L;
+ long l541 = 541L;
+ long l542 = 542L;
+ long l543 = 543L;
+ long l544 = 544L;
+ long l545 = 545L;
+ long l546 = 546L;
+ long l547 = 547L;
+ long l548 = 548L;
+ long l549 = 549L;
+ long l550 = 550L;
+ long l551 = 551L;
+ long l552 = 552L;
+ long l553 = 553L;
+ long l554 = 554L;
+ long l555 = 555L;
+ long l556 = 556L;
+ long l557 = 557L;
+ long l558 = 558L;
+ long l559 = 559L;
+ long l560 = 560L;
+ long l561 = 561L;
+ long l562 = 562L;
+ long l563 = 563L;
+ long l564 = 564L;
+ long l565 = 565L;
+ long l566 = 566L;
+ long l567 = 567L;
+ long l568 = 568L;
+ long l569 = 569L;
+ long l570 = 570L;
+ long l571 = 571L;
+ long l572 = 572L;
+ long l573 = 573L;
+ long l574 = 574L;
+ long l575 = 575L;
+ long l576 = 576L;
+ long l577 = 577L;
+ long l578 = 578L;
+ long l579 = 579L;
+ long l580 = 580L;
+ long l581 = 581L;
+ long l582 = 582L;
+ long l583 = 583L;
+ long l584 = 584L;
+ long l585 = 585L;
+ long l586 = 586L;
+ long l587 = 587L;
+ long l588 = 588L;
+ long l589 = 589L;
+ long l590 = 590L;
+ long l591 = 591L;
+ long l592 = 592L;
+ long l593 = 593L;
+ long l594 = 594L;
+ long l595 = 595L;
+ long l596 = 596L;
+ long l597 = 597L;
+ long l598 = 598L;
+ long l599 = 599L;
+ long l600 = 600L;
+ long l601 = 601L;
+ long l602 = 602L;
+ long l603 = 603L;
+ long l604 = 604L;
+ long l605 = 605L;
+ long l606 = 606L;
+ long l607 = 607L;
+ long l608 = 608L;
+ long l609 = 609L;
+ long l610 = 610L;
+ long l611 = 611L;
+ long l612 = 612L;
+ long l613 = 613L;
+ long l614 = 614L;
+ long l615 = 615L;
+ long l616 = 616L;
+ long l617 = 617L;
+ long l618 = 618L;
+ long l619 = 619L;
+ long l620 = 620L;
+ long l621 = 621L;
+ long l622 = 622L;
+ long l623 = 623L;
+ long l624 = 624L;
+ long l625 = 625L;
+ long l626 = 626L;
+ long l627 = 627L;
+ long l628 = 628L;
+ long l629 = 629L;
+ long l630 = 630L;
+ long l631 = 631L;
+ long l632 = 632L;
+ long l633 = 633L;
+ long l634 = 634L;
+ long l635 = 635L;
+ long l636 = 636L;
+ long l637 = 637L;
+ long l638 = 638L;
+ long l639 = 639L;
+ long l640 = 640L;
+ long l641 = 641L;
+ long l642 = 642L;
+ long l643 = 643L;
+ long l644 = 644L;
+ long l645 = 645L;
+ long l646 = 646L;
+ long l647 = 647L;
+ long l648 = 648L;
+ long l649 = 649L;
+ long l650 = 650L;
+ long l651 = 651L;
+ long l652 = 652L;
+ long l653 = 653L;
+ long l654 = 654L;
+ long l655 = 655L;
+ long l656 = 656L;
+ long l657 = 657L;
+ long l658 = 658L;
+ long l659 = 659L;
+ long l660 = 660L;
+ long l661 = 661L;
+ long l662 = 662L;
+ long l663 = 663L;
+ long l664 = 664L;
+ long l665 = 665L;
+ long l666 = 666L;
+ long l667 = 667L;
+ long l668 = 668L;
+ long l669 = 669L;
+ long l670 = 670L;
+ long l671 = 671L;
+ long l672 = 672L;
+ long l673 = 673L;
+ long l674 = 674L;
+ long l675 = 675L;
+ long l676 = 676L;
+ long l677 = 677L;
+ long l678 = 678L;
+ long l679 = 679L;
+ long l680 = 680L;
+ long l681 = 681L;
+ long l682 = 682L;
+ long l683 = 683L;
+ long l684 = 684L;
+ long l685 = 685L;
+ long l686 = 686L;
+ long l687 = 687L;
+ long l688 = 688L;
+ long l689 = 689L;
+ long l690 = 690L;
+ long l691 = 691L;
+ long l692 = 692L;
+ long l693 = 693L;
+ long l694 = 694L;
+ long l695 = 695L;
+ long l696 = 696L;
+ long l697 = 697L;
+ long l698 = 698L;
+ long l699 = 699L;
+ long l700 = 700L;
+ long l701 = 701L;
+ long l702 = 702L;
+ long l703 = 703L;
+ long l704 = 704L;
+ long l705 = 705L;
+ long l706 = 706L;
+ long l707 = 707L;
+ long l708 = 708L;
+ long l709 = 709L;
+ long l710 = 710L;
+ long l711 = 711L;
+ long l712 = 712L;
+ long l713 = 713L;
+ long l714 = 714L;
+ long l715 = 715L;
+ long l716 = 716L;
+ long l717 = 717L;
+ long l718 = 718L;
+ long l719 = 719L;
+ long l720 = 720L;
+ long l721 = 721L;
+ long l722 = 722L;
+ long l723 = 723L;
+ long l724 = 724L;
+ long l725 = 725L;
+ long l726 = 726L;
+ long l727 = 727L;
+ long l728 = 728L;
+ long l729 = 729L;
+ long l730 = 730L;
+ long l731 = 731L;
+ long l732 = 732L;
+ long l733 = 733L;
+ long l734 = 734L;
+ long l735 = 735L;
+ long l736 = 736L;
+ long l737 = 737L;
+ long l738 = 738L;
+ long l739 = 739L;
+ long l740 = 740L;
+ long l741 = 741L;
+ long l742 = 742L;
+ long l743 = 743L;
+ long l744 = 744L;
+ long l745 = 745L;
+ long l746 = 746L;
+ long l747 = 747L;
+ long l748 = 748L;
+ long l749 = 749L;
+ long l750 = 750L;
+ long l751 = 751L;
+ long l752 = 752L;
+ long l753 = 753L;
+ long l754 = 754L;
+ long l755 = 755L;
+ long l756 = 756L;
+ long l757 = 757L;
+ long l758 = 758L;
+ long l759 = 759L;
+ long l760 = 760L;
+ long l761 = 761L;
+ long l762 = 762L;
+ long l763 = 763L;
+ long l764 = 764L;
+ long l765 = 765L;
+ long l766 = 766L;
+ long l767 = 767L;
+ long l768 = 768L;
+ long l769 = 769L;
+ long l770 = 770L;
+ long l771 = 771L;
+ long l772 = 772L;
+ long l773 = 773L;
+ long l774 = 774L;
+ long l775 = 775L;
+ long l776 = 776L;
+ long l777 = 777L;
+ long l778 = 778L;
+ long l779 = 779L;
+ long l780 = 780L;
+ long l781 = 781L;
+ long l782 = 782L;
+ long l783 = 783L;
+ long l784 = 784L;
+ long l785 = 785L;
+ long l786 = 786L;
+ long l787 = 787L;
+ long l788 = 788L;
+ long l789 = 789L;
+ long l790 = 790L;
+ long l791 = 791L;
+ long l792 = 792L;
+ long l793 = 793L;
+ long l794 = 794L;
+ long l795 = 795L;
+ long l796 = 796L;
+ long l797 = 797L;
+ long l798 = 798L;
+ long l799 = 799L;
+ long l800 = 800L;
+ long l801 = 801L;
+ long l802 = 802L;
+ long l803 = 803L;
+ long l804 = 804L;
+ long l805 = 805L;
+ long l806 = 806L;
+ long l807 = 807L;
+ long l808 = 808L;
+ long l809 = 809L;
+ long l810 = 810L;
+ long l811 = 811L;
+ long l812 = 812L;
+ long l813 = 813L;
+ long l814 = 814L;
+ long l815 = 815L;
+ long l816 = 816L;
+ long l817 = 817L;
+ long l818 = 818L;
+ long l819 = 819L;
+ long l820 = 820L;
+ long l821 = 821L;
+ long l822 = 822L;
+ long l823 = 823L;
+ long l824 = 824L;
+ long l825 = 825L;
+ long l826 = 826L;
+ long l827 = 827L;
+ long l828 = 828L;
+ long l829 = 829L;
+ long l830 = 830L;
+ long l831 = 831L;
+ long l832 = 832L;
+ long l833 = 833L;
+ long l834 = 834L;
+ long l835 = 835L;
+ long l836 = 836L;
+ long l837 = 837L;
+ long l838 = 838L;
+ long l839 = 839L;
+ long l840 = 840L;
+ long l841 = 841L;
+ long l842 = 842L;
+ long l843 = 843L;
+ long l844 = 844L;
+ long l845 = 845L;
+ long l846 = 846L;
+ long l847 = 847L;
+ long l848 = 848L;
+ long l849 = 849L;
+ long l850 = 850L;
+ long l851 = 851L;
+ long l852 = 852L;
+ long l853 = 853L;
+ long l854 = 854L;
+ long l855 = 855L;
+ long l856 = 856L;
+ long l857 = 857L;
+ long l858 = 858L;
+ long l859 = 859L;
+ long l860 = 860L;
+ long l861 = 861L;
+ long l862 = 862L;
+ long l863 = 863L;
+ long l864 = 864L;
+ long l865 = 865L;
+ long l866 = 866L;
+ long l867 = 867L;
+ long l868 = 868L;
+ long l869 = 869L;
+ long l870 = 870L;
+ long l871 = 871L;
+ long l872 = 872L;
+ long l873 = 873L;
+ long l874 = 874L;
+ long l875 = 875L;
+ long l876 = 876L;
+ long l877 = 877L;
+ long l878 = 878L;
+ long l879 = 879L;
+ long l880 = 880L;
+ long l881 = 881L;
+ long l882 = 882L;
+ long l883 = 883L;
+ long l884 = 884L;
+ long l885 = 885L;
+ long l886 = 886L;
+ long l887 = 887L;
+ long l888 = 888L;
+ long l889 = 889L;
+ long l890 = 890L;
+ long l891 = 891L;
+ long l892 = 892L;
+ long l893 = 893L;
+ long l894 = 894L;
+ long l895 = 895L;
+ long l896 = 896L;
+ long l897 = 897L;
+ long l898 = 898L;
+ long l899 = 899L;
+ long l900 = 900L;
+ long l901 = 901L;
+ long l902 = 902L;
+ long l903 = 903L;
+ long l904 = 904L;
+ long l905 = 905L;
+ long l906 = 906L;
+ long l907 = 907L;
+ long l908 = 908L;
+ long l909 = 909L;
+ long l910 = 910L;
+ long l911 = 911L;
+ long l912 = 912L;
+ long l913 = 913L;
+ long l914 = 914L;
+ long l915 = 915L;
+ long l916 = 916L;
+ long l917 = 917L;
+ long l918 = 918L;
+ long l919 = 919L;
+ long l920 = 920L;
+ long l921 = 921L;
+ long l922 = 922L;
+ long l923 = 923L;
+ long l924 = 924L;
+ long l925 = 925L;
+ long l926 = 926L;
+ long l927 = 927L;
+ long l928 = 928L;
+ long l929 = 929L;
+ long l930 = 930L;
+ long l931 = 931L;
+ long l932 = 932L;
+ long l933 = 933L;
+ long l934 = 934L;
+ long l935 = 935L;
+ long l936 = 936L;
+ long l937 = 937L;
+ long l938 = 938L;
+ long l939 = 939L;
+ long l940 = 940L;
+ long l941 = 941L;
+ long l942 = 942L;
+ long l943 = 943L;
+ long l944 = 944L;
+ long l945 = 945L;
+ long l946 = 946L;
+ long l947 = 947L;
+ long l948 = 948L;
+ long l949 = 949L;
+ long l950 = 950L;
+ long l951 = 951L;
+ long l952 = 952L;
+ long l953 = 953L;
+ long l954 = 954L;
+ long l955 = 955L;
+ long l956 = 956L;
+ long l957 = 957L;
+ long l958 = 958L;
+ long l959 = 959L;
+ long l960 = 960L;
+ long l961 = 961L;
+ long l962 = 962L;
+ long l963 = 963L;
+ long l964 = 964L;
+ long l965 = 965L;
+ long l966 = 966L;
+ long l967 = 967L;
+ long l968 = 968L;
+ long l969 = 969L;
+ long l970 = 970L;
+ long l971 = 971L;
+ long l972 = 972L;
+ long l973 = 973L;
+ long l974 = 974L;
+ long l975 = 975L;
+ long l976 = 976L;
+ long l977 = 977L;
+ long l978 = 978L;
+ long l979 = 979L;
+ long l980 = 980L;
+ long l981 = 981L;
+ long l982 = 982L;
+ long l983 = 983L;
+ long l984 = 984L;
+ long l985 = 985L;
+ long l986 = 986L;
+ long l987 = 987L;
+ long l988 = 988L;
+ long l989 = 989L;
+ long l990 = 990L;
+ long l991 = 991L;
+ long l992 = 992L;
+ long l993 = 993L;
+ long l994 = 994L;
+ long l995 = 995L;
+ long l996 = 996L;
+ long l997 = 997L;
+ long l998 = 998L;
+ long l999 = 999L;
+ l1 += l0;
+ l2 += l1;
+ l3 += l2;
+ l4 += l3;
+ l5 += l4;
+ l6 += l5;
+ l7 += l6;
+ l8 += l7;
+ l9 += l8;
+ l10 += l9;
+ l11 += l10;
+ l12 += l11;
+ l13 += l12;
+ l14 += l13;
+ l15 += l14;
+ l16 += l15;
+ l17 += l16;
+ l18 += l17;
+ l19 += l18;
+ l20 += l19;
+ l21 += l20;
+ l22 += l21;
+ l23 += l22;
+ l24 += l23;
+ l25 += l24;
+ l26 += l25;
+ l27 += l26;
+ l28 += l27;
+ l29 += l28;
+ l30 += l29;
+ l31 += l30;
+ l32 += l31;
+ l33 += l32;
+ l34 += l33;
+ l35 += l34;
+ l36 += l35;
+ l37 += l36;
+ l38 += l37;
+ l39 += l38;
+ l40 += l39;
+ l41 += l40;
+ l42 += l41;
+ l43 += l42;
+ l44 += l43;
+ l45 += l44;
+ l46 += l45;
+ l47 += l46;
+ l48 += l47;
+ l49 += l48;
+ l50 += l49;
+ l51 += l50;
+ l52 += l51;
+ l53 += l52;
+ l54 += l53;
+ l55 += l54;
+ l56 += l55;
+ l57 += l56;
+ l58 += l57;
+ l59 += l58;
+ l60 += l59;
+ l61 += l60;
+ l62 += l61;
+ l63 += l62;
+ l64 += l63;
+ l65 += l64;
+ l66 += l65;
+ l67 += l66;
+ l68 += l67;
+ l69 += l68;
+ l70 += l69;
+ l71 += l70;
+ l72 += l71;
+ l73 += l72;
+ l74 += l73;
+ l75 += l74;
+ l76 += l75;
+ l77 += l76;
+ l78 += l77;
+ l79 += l78;
+ l80 += l79;
+ l81 += l80;
+ l82 += l81;
+ l83 += l82;
+ l84 += l83;
+ l85 += l84;
+ l86 += l85;
+ l87 += l86;
+ l88 += l87;
+ l89 += l88;
+ l90 += l89;
+ l91 += l90;
+ l92 += l91;
+ l93 += l92;
+ l94 += l93;
+ l95 += l94;
+ l96 += l95;
+ l97 += l96;
+ l98 += l97;
+ l99 += l98;
+ l100 += l99;
+ l101 += l100;
+ l102 += l101;
+ l103 += l102;
+ l104 += l103;
+ l105 += l104;
+ l106 += l105;
+ l107 += l106;
+ l108 += l107;
+ l109 += l108;
+ l110 += l109;
+ l111 += l110;
+ l112 += l111;
+ l113 += l112;
+ l114 += l113;
+ l115 += l114;
+ l116 += l115;
+ l117 += l116;
+ l118 += l117;
+ l119 += l118;
+ l120 += l119;
+ l121 += l120;
+ l122 += l121;
+ l123 += l122;
+ l124 += l123;
+ l125 += l124;
+ l126 += l125;
+ l127 += l126;
+ l128 += l127;
+ l129 += l128;
+ l130 += l129;
+ l131 += l130;
+ l132 += l131;
+ l133 += l132;
+ l134 += l133;
+ l135 += l134;
+ l136 += l135;
+ l137 += l136;
+ l138 += l137;
+ l139 += l138;
+ l140 += l139;
+ l141 += l140;
+ l142 += l141;
+ l143 += l142;
+ l144 += l143;
+ l145 += l144;
+ l146 += l145;
+ l147 += l146;
+ l148 += l147;
+ l149 += l148;
+ l150 += l149;
+ l151 += l150;
+ l152 += l151;
+ l153 += l152;
+ l154 += l153;
+ l155 += l154;
+ l156 += l155;
+ l157 += l156;
+ l158 += l157;
+ l159 += l158;
+ l160 += l159;
+ l161 += l160;
+ l162 += l161;
+ l163 += l162;
+ l164 += l163;
+ l165 += l164;
+ l166 += l165;
+ l167 += l166;
+ l168 += l167;
+ l169 += l168;
+ l170 += l169;
+ l171 += l170;
+ l172 += l171;
+ l173 += l172;
+ l174 += l173;
+ l175 += l174;
+ l176 += l175;
+ l177 += l176;
+ l178 += l177;
+ l179 += l178;
+ l180 += l179;
+ l181 += l180;
+ l182 += l181;
+ l183 += l182;
+ l184 += l183;
+ l185 += l184;
+ l186 += l185;
+ l187 += l186;
+ l188 += l187;
+ l189 += l188;
+ l190 += l189;
+ l191 += l190;
+ l192 += l191;
+ l193 += l192;
+ l194 += l193;
+ l195 += l194;
+ l196 += l195;
+ l197 += l196;
+ l198 += l197;
+ l199 += l198;
+ l200 += l199;
+ l201 += l200;
+ l202 += l201;
+ l203 += l202;
+ l204 += l203;
+ l205 += l204;
+ l206 += l205;
+ l207 += l206;
+ l208 += l207;
+ l209 += l208;
+ l210 += l209;
+ l211 += l210;
+ l212 += l211;
+ l213 += l212;
+ l214 += l213;
+ l215 += l214;
+ l216 += l215;
+ l217 += l216;
+ l218 += l217;
+ l219 += l218;
+ l220 += l219;
+ l221 += l220;
+ l222 += l221;
+ l223 += l222;
+ l224 += l223;
+ l225 += l224;
+ l226 += l225;
+ l227 += l226;
+ l228 += l227;
+ l229 += l228;
+ l230 += l229;
+ l231 += l230;
+ l232 += l231;
+ l233 += l232;
+ l234 += l233;
+ l235 += l234;
+ l236 += l235;
+ l237 += l236;
+ l238 += l237;
+ l239 += l238;
+ l240 += l239;
+ l241 += l240;
+ l242 += l241;
+ l243 += l242;
+ l244 += l243;
+ l245 += l244;
+ l246 += l245;
+ l247 += l246;
+ l248 += l247;
+ l249 += l248;
+ l250 += l249;
+ l251 += l250;
+ l252 += l251;
+ l253 += l252;
+ l254 += l253;
+ l255 += l254;
+ l256 += l255;
+ l257 += l256;
+ l258 += l257;
+ l259 += l258;
+ l260 += l259;
+ l261 += l260;
+ l262 += l261;
+ l263 += l262;
+ l264 += l263;
+ l265 += l264;
+ l266 += l265;
+ l267 += l266;
+ l268 += l267;
+ l269 += l268;
+ l270 += l269;
+ l271 += l270;
+ l272 += l271;
+ l273 += l272;
+ l274 += l273;
+ l275 += l274;
+ l276 += l275;
+ l277 += l276;
+ l278 += l277;
+ l279 += l278;
+ l280 += l279;
+ l281 += l280;
+ l282 += l281;
+ l283 += l282;
+ l284 += l283;
+ l285 += l284;
+ l286 += l285;
+ l287 += l286;
+ l288 += l287;
+ l289 += l288;
+ l290 += l289;
+ l291 += l290;
+ l292 += l291;
+ l293 += l292;
+ l294 += l293;
+ l295 += l294;
+ l296 += l295;
+ l297 += l296;
+ l298 += l297;
+ l299 += l298;
+ l300 += l299;
+ l301 += l300;
+ l302 += l301;
+ l303 += l302;
+ l304 += l303;
+ l305 += l304;
+ l306 += l305;
+ l307 += l306;
+ l308 += l307;
+ l309 += l308;
+ l310 += l309;
+ l311 += l310;
+ l312 += l311;
+ l313 += l312;
+ l314 += l313;
+ l315 += l314;
+ l316 += l315;
+ l317 += l316;
+ l318 += l317;
+ l319 += l318;
+ l320 += l319;
+ l321 += l320;
+ l322 += l321;
+ l323 += l322;
+ l324 += l323;
+ l325 += l324;
+ l326 += l325;
+ l327 += l326;
+ l328 += l327;
+ l329 += l328;
+ l330 += l329;
+ l331 += l330;
+ l332 += l331;
+ l333 += l332;
+ l334 += l333;
+ l335 += l334;
+ l336 += l335;
+ l337 += l336;
+ l338 += l337;
+ l339 += l338;
+ l340 += l339;
+ l341 += l340;
+ l342 += l341;
+ l343 += l342;
+ l344 += l343;
+ l345 += l344;
+ l346 += l345;
+ l347 += l346;
+ l348 += l347;
+ l349 += l348;
+ l350 += l349;
+ l351 += l350;
+ l352 += l351;
+ l353 += l352;
+ l354 += l353;
+ l355 += l354;
+ l356 += l355;
+ l357 += l356;
+ l358 += l357;
+ l359 += l358;
+ l360 += l359;
+ l361 += l360;
+ l362 += l361;
+ l363 += l362;
+ l364 += l363;
+ l365 += l364;
+ l366 += l365;
+ l367 += l366;
+ l368 += l367;
+ l369 += l368;
+ l370 += l369;
+ l371 += l370;
+ l372 += l371;
+ l373 += l372;
+ l374 += l373;
+ l375 += l374;
+ l376 += l375;
+ l377 += l376;
+ l378 += l377;
+ l379 += l378;
+ l380 += l379;
+ l381 += l380;
+ l382 += l381;
+ l383 += l382;
+ l384 += l383;
+ l385 += l384;
+ l386 += l385;
+ l387 += l386;
+ l388 += l387;
+ l389 += l388;
+ l390 += l389;
+ l391 += l390;
+ l392 += l391;
+ l393 += l392;
+ l394 += l393;
+ l395 += l394;
+ l396 += l395;
+ l397 += l396;
+ l398 += l397;
+ l399 += l398;
+ l400 += l399;
+ l401 += l400;
+ l402 += l401;
+ l403 += l402;
+ l404 += l403;
+ l405 += l404;
+ l406 += l405;
+ l407 += l406;
+ l408 += l407;
+ l409 += l408;
+ l410 += l409;
+ l411 += l410;
+ l412 += l411;
+ l413 += l412;
+ l414 += l413;
+ l415 += l414;
+ l416 += l415;
+ l417 += l416;
+ l418 += l417;
+ l419 += l418;
+ l420 += l419;
+ l421 += l420;
+ l422 += l421;
+ l423 += l422;
+ l424 += l423;
+ l425 += l424;
+ l426 += l425;
+ l427 += l426;
+ l428 += l427;
+ l429 += l428;
+ l430 += l429;
+ l431 += l430;
+ l432 += l431;
+ l433 += l432;
+ l434 += l433;
+ l435 += l434;
+ l436 += l435;
+ l437 += l436;
+ l438 += l437;
+ l439 += l438;
+ l440 += l439;
+ l441 += l440;
+ l442 += l441;
+ l443 += l442;
+ l444 += l443;
+ l445 += l444;
+ l446 += l445;
+ l447 += l446;
+ l448 += l447;
+ l449 += l448;
+ l450 += l449;
+ l451 += l450;
+ l452 += l451;
+ l453 += l452;
+ l454 += l453;
+ l455 += l454;
+ l456 += l455;
+ l457 += l456;
+ l458 += l457;
+ l459 += l458;
+ l460 += l459;
+ l461 += l460;
+ l462 += l461;
+ l463 += l462;
+ l464 += l463;
+ l465 += l464;
+ l466 += l465;
+ l467 += l466;
+ l468 += l467;
+ l469 += l468;
+ l470 += l469;
+ l471 += l470;
+ l472 += l471;
+ l473 += l472;
+ l474 += l473;
+ l475 += l474;
+ l476 += l475;
+ l477 += l476;
+ l478 += l477;
+ l479 += l478;
+ l480 += l479;
+ l481 += l480;
+ l482 += l481;
+ l483 += l482;
+ l484 += l483;
+ l485 += l484;
+ l486 += l485;
+ l487 += l486;
+ l488 += l487;
+ l489 += l488;
+ l490 += l489;
+ l491 += l490;
+ l492 += l491;
+ l493 += l492;
+ l494 += l493;
+ l495 += l494;
+ l496 += l495;
+ l497 += l496;
+ l498 += l497;
+ l499 += l498;
+ l500 += l499;
+ l501 += l500;
+ l502 += l501;
+ l503 += l502;
+ l504 += l503;
+ l505 += l504;
+ l506 += l505;
+ l507 += l506;
+ l508 += l507;
+ l509 += l508;
+ l510 += l509;
+ l511 += l510;
+ l512 += l511;
+ l513 += l512;
+ l514 += l513;
+ l515 += l514;
+ l516 += l515;
+ l517 += l516;
+ l518 += l517;
+ l519 += l518;
+ l520 += l519;
+ l521 += l520;
+ l522 += l521;
+ l523 += l522;
+ l524 += l523;
+ l525 += l524;
+ l526 += l525;
+ l527 += l526;
+ l528 += l527;
+ l529 += l528;
+ l530 += l529;
+ l531 += l530;
+ l532 += l531;
+ l533 += l532;
+ l534 += l533;
+ l535 += l534;
+ l536 += l535;
+ l537 += l536;
+ l538 += l537;
+ l539 += l538;
+ l540 += l539;
+ l541 += l540;
+ l542 += l541;
+ l543 += l542;
+ l544 += l543;
+ l545 += l544;
+ l546 += l545;
+ l547 += l546;
+ l548 += l547;
+ l549 += l548;
+ l550 += l549;
+ l551 += l550;
+ l552 += l551;
+ l553 += l552;
+ l554 += l553;
+ l555 += l554;
+ l556 += l555;
+ l557 += l556;
+ l558 += l557;
+ l559 += l558;
+ l560 += l559;
+ l561 += l560;
+ l562 += l561;
+ l563 += l562;
+ l564 += l563;
+ l565 += l564;
+ l566 += l565;
+ l567 += l566;
+ l568 += l567;
+ l569 += l568;
+ l570 += l569;
+ l571 += l570;
+ l572 += l571;
+ l573 += l572;
+ l574 += l573;
+ l575 += l574;
+ l576 += l575;
+ l577 += l576;
+ l578 += l577;
+ l579 += l578;
+ l580 += l579;
+ l581 += l580;
+ l582 += l581;
+ l583 += l582;
+ l584 += l583;
+ l585 += l584;
+ l586 += l585;
+ l587 += l586;
+ l588 += l587;
+ l589 += l588;
+ l590 += l589;
+ l591 += l590;
+ l592 += l591;
+ l593 += l592;
+ l594 += l593;
+ l595 += l594;
+ l596 += l595;
+ l597 += l596;
+ l598 += l597;
+ l599 += l598;
+ l600 += l599;
+ l601 += l600;
+ l602 += l601;
+ l603 += l602;
+ l604 += l603;
+ l605 += l604;
+ l606 += l605;
+ l607 += l606;
+ l608 += l607;
+ l609 += l608;
+ l610 += l609;
+ l611 += l610;
+ l612 += l611;
+ l613 += l612;
+ l614 += l613;
+ l615 += l614;
+ l616 += l615;
+ l617 += l616;
+ l618 += l617;
+ l619 += l618;
+ l620 += l619;
+ l621 += l620;
+ l622 += l621;
+ l623 += l622;
+ l624 += l623;
+ l625 += l624;
+ l626 += l625;
+ l627 += l626;
+ l628 += l627;
+ l629 += l628;
+ l630 += l629;
+ l631 += l630;
+ l632 += l631;
+ l633 += l632;
+ l634 += l633;
+ l635 += l634;
+ l636 += l635;
+ l637 += l636;
+ l638 += l637;
+ l639 += l638;
+ l640 += l639;
+ l641 += l640;
+ l642 += l641;
+ l643 += l642;
+ l644 += l643;
+ l645 += l644;
+ l646 += l645;
+ l647 += l646;
+ l648 += l647;
+ l649 += l648;
+ l650 += l649;
+ l651 += l650;
+ l652 += l651;
+ l653 += l652;
+ l654 += l653;
+ l655 += l654;
+ l656 += l655;
+ l657 += l656;
+ l658 += l657;
+ l659 += l658;
+ l660 += l659;
+ l661 += l660;
+ l662 += l661;
+ l663 += l662;
+ l664 += l663;
+ l665 += l664;
+ l666 += l665;
+ l667 += l666;
+ l668 += l667;
+ l669 += l668;
+ l670 += l669;
+ l671 += l670;
+ l672 += l671;
+ l673 += l672;
+ l674 += l673;
+ l675 += l674;
+ l676 += l675;
+ l677 += l676;
+ l678 += l677;
+ l679 += l678;
+ l680 += l679;
+ l681 += l680;
+ l682 += l681;
+ l683 += l682;
+ l684 += l683;
+ l685 += l684;
+ l686 += l685;
+ l687 += l686;
+ l688 += l687;
+ l689 += l688;
+ l690 += l689;
+ l691 += l690;
+ l692 += l691;
+ l693 += l692;
+ l694 += l693;
+ l695 += l694;
+ l696 += l695;
+ l697 += l696;
+ l698 += l697;
+ l699 += l698;
+ l700 += l699;
+ l701 += l700;
+ l702 += l701;
+ l703 += l702;
+ l704 += l703;
+ l705 += l704;
+ l706 += l705;
+ l707 += l706;
+ l708 += l707;
+ l709 += l708;
+ l710 += l709;
+ l711 += l710;
+ l712 += l711;
+ l713 += l712;
+ l714 += l713;
+ l715 += l714;
+ l716 += l715;
+ l717 += l716;
+ l718 += l717;
+ l719 += l718;
+ l720 += l719;
+ l721 += l720;
+ l722 += l721;
+ l723 += l722;
+ l724 += l723;
+ l725 += l724;
+ l726 += l725;
+ l727 += l726;
+ l728 += l727;
+ l729 += l728;
+ l730 += l729;
+ l731 += l730;
+ l732 += l731;
+ l733 += l732;
+ l734 += l733;
+ l735 += l734;
+ l736 += l735;
+ l737 += l736;
+ l738 += l737;
+ l739 += l738;
+ l740 += l739;
+ l741 += l740;
+ l742 += l741;
+ l743 += l742;
+ l744 += l743;
+ l745 += l744;
+ l746 += l745;
+ l747 += l746;
+ l748 += l747;
+ l749 += l748;
+ l750 += l749;
+ l751 += l750;
+ l752 += l751;
+ l753 += l752;
+ l754 += l753;
+ l755 += l754;
+ l756 += l755;
+ l757 += l756;
+ l758 += l757;
+ l759 += l758;
+ l760 += l759;
+ l761 += l760;
+ l762 += l761;
+ l763 += l762;
+ l764 += l763;
+ l765 += l764;
+ l766 += l765;
+ l767 += l766;
+ l768 += l767;
+ l769 += l768;
+ l770 += l769;
+ l771 += l770;
+ l772 += l771;
+ l773 += l772;
+ l774 += l773;
+ l775 += l774;
+ l776 += l775;
+ l777 += l776;
+ l778 += l777;
+ l779 += l778;
+ l780 += l779;
+ l781 += l780;
+ l782 += l781;
+ l783 += l782;
+ l784 += l783;
+ l785 += l784;
+ l786 += l785;
+ l787 += l786;
+ l788 += l787;
+ l789 += l788;
+ l790 += l789;
+ l791 += l790;
+ l792 += l791;
+ l793 += l792;
+ l794 += l793;
+ l795 += l794;
+ l796 += l795;
+ l797 += l796;
+ l798 += l797;
+ l799 += l798;
+ l800 += l799;
+ l801 += l800;
+ l802 += l801;
+ l803 += l802;
+ l804 += l803;
+ l805 += l804;
+ l806 += l805;
+ l807 += l806;
+ l808 += l807;
+ l809 += l808;
+ l810 += l809;
+ l811 += l810;
+ l812 += l811;
+ l813 += l812;
+ l814 += l813;
+ l815 += l814;
+ l816 += l815;
+ l817 += l816;
+ l818 += l817;
+ l819 += l818;
+ l820 += l819;
+ l821 += l820;
+ l822 += l821;
+ l823 += l822;
+ l824 += l823;
+ l825 += l824;
+ l826 += l825;
+ l827 += l826;
+ l828 += l827;
+ l829 += l828;
+ l830 += l829;
+ l831 += l830;
+ l832 += l831;
+ l833 += l832;
+ l834 += l833;
+ l835 += l834;
+ l836 += l835;
+ l837 += l836;
+ l838 += l837;
+ l839 += l838;
+ l840 += l839;
+ l841 += l840;
+ l842 += l841;
+ l843 += l842;
+ l844 += l843;
+ l845 += l844;
+ l846 += l845;
+ l847 += l846;
+ l848 += l847;
+ l849 += l848;
+ l850 += l849;
+ l851 += l850;
+ l852 += l851;
+ l853 += l852;
+ l854 += l853;
+ l855 += l854;
+ l856 += l855;
+ l857 += l856;
+ l858 += l857;
+ l859 += l858;
+ l860 += l859;
+ l861 += l860;
+ l862 += l861;
+ l863 += l862;
+ l864 += l863;
+ l865 += l864;
+ l866 += l865;
+ l867 += l866;
+ l868 += l867;
+ l869 += l868;
+ l870 += l869;
+ l871 += l870;
+ l872 += l871;
+ l873 += l872;
+ l874 += l873;
+ l875 += l874;
+ l876 += l875;
+ l877 += l876;
+ l878 += l877;
+ l879 += l878;
+ l880 += l879;
+ l881 += l880;
+ l882 += l881;
+ l883 += l882;
+ l884 += l883;
+ l885 += l884;
+ l886 += l885;
+ l887 += l886;
+ l888 += l887;
+ l889 += l888;
+ l890 += l889;
+ l891 += l890;
+ l892 += l891;
+ l893 += l892;
+ l894 += l893;
+ l895 += l894;
+ l896 += l895;
+ l897 += l896;
+ l898 += l897;
+ l899 += l898;
+ l900 += l899;
+ l901 += l900;
+ l902 += l901;
+ l903 += l902;
+ l904 += l903;
+ l905 += l904;
+ l906 += l905;
+ l907 += l906;
+ l908 += l907;
+ l909 += l908;
+ l910 += l909;
+ l911 += l910;
+ l912 += l911;
+ l913 += l912;
+ l914 += l913;
+ l915 += l914;
+ l916 += l915;
+ l917 += l916;
+ l918 += l917;
+ l919 += l918;
+ l920 += l919;
+ l921 += l920;
+ l922 += l921;
+ l923 += l922;
+ l924 += l923;
+ l925 += l924;
+ l926 += l925;
+ l927 += l926;
+ l928 += l927;
+ l929 += l928;
+ l930 += l929;
+ l931 += l930;
+ l932 += l931;
+ l933 += l932;
+ l934 += l933;
+ l935 += l934;
+ l936 += l935;
+ l937 += l936;
+ l938 += l937;
+ l939 += l938;
+ l940 += l939;
+ l941 += l940;
+ l942 += l941;
+ l943 += l942;
+ l944 += l943;
+ l945 += l944;
+ l946 += l945;
+ l947 += l946;
+ l948 += l947;
+ l949 += l948;
+ l950 += l949;
+ l951 += l950;
+ l952 += l951;
+ l953 += l952;
+ l954 += l953;
+ l955 += l954;
+ l956 += l955;
+ l957 += l956;
+ l958 += l957;
+ l959 += l958;
+ l960 += l959;
+ l961 += l960;
+ l962 += l961;
+ l963 += l962;
+ l964 += l963;
+ l965 += l964;
+ l966 += l965;
+ l967 += l966;
+ l968 += l967;
+ l969 += l968;
+ l970 += l969;
+ l971 += l970;
+ l972 += l971;
+ l973 += l972;
+ l974 += l973;
+ l975 += l974;
+ l976 += l975;
+ l977 += l976;
+ l978 += l977;
+ l979 += l978;
+ l980 += l979;
+ l981 += l980;
+ l982 += l981;
+ l983 += l982;
+ l984 += l983;
+ l985 += l984;
+ l986 += l985;
+ l987 += l986;
+ l988 += l987;
+ l989 += l988;
+ l990 += l989;
+ l991 += l990;
+ l992 += l991;
+ l993 += l992;
+ l994 += l993;
+ l995 += l994;
+ l996 += l995;
+ l997 += l996;
+ l998 += l997;
+ l999 += l998;
+ return l999;
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index f0eb351..0a1e3e1 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -307,35 +307,108 @@
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
+ 001-HelloWorld \
003-omnibus-opcodes \
+ 004-InterfaceTest \
+ 004-JniTest \
+ 004-StackWalk \
+ 004-UnsafeTest \
006-args \
+ 007-count10 \
011-array-copy \
+ 013-math2 \
+ 016-intern \
+ 017-float \
018-stack-overflow \
+ 020-string \
+ 022-interface \
+ 023-many-interfaces \
+ 026-access \
+ 028-array-write \
+ 030-bad-finalizer \
+ 031-class-attributes \
+ 032-concrete-sub \
036-finalizer \
+ 037-inherit \
+ 038-inner-null \
+ 043-privates \
044-proxy \
+ 045-reflect-array \
+ 046-reflect \
+ 047-returns \
+ 049-show-object \
+ 050-sync-test \
+ 051-thread \
+ 052-verifier-fun \
+ 054-uncaught \
+ 056-const-string-jumbo \
+ 061-out-of-memory \
+ 063-process-manager \
+ 067-preemptive-unpark \
+ 068-classloader \
+ 069-field-type \
070-nio-buffer \
+ 071-dexfile \
072-precise-gc \
+ 074-gc-thrash \
+ 076-boolean-put \
+ 077-method-override \
+ 079-phantom \
+ 080-oom-throw \
082-inline-execute \
083-compiler-regressions \
+ 084-class-init \
+ 085-old-style-inner-class \
+ 086-null-super \
+ 087-gc-after-link \
+ 090-loop-formation \
+ 092-locale \
093-serialization \
+ 094-pattern \
096-array-copy-concurrent-gc \
+ 097-duplicate-method \
+ 098-ddmc \
100-reflect2 \
+ 102-concurrent-gc \
+ 103-string-append \
+ 105-invoke \
106-exceptions2 \
107-int-math2 \
+ 109-suspend-check \
+ 110-field-access \
+ 112-double-math \
+ 113-multidex \
+ 117-nopatchoat \
121-modifiers \
122-npe \
123-compiler-regressions-mt \
+ 124-missing-classes \
+ 125-gc-and-classloading \
+ 126-miranda-multidex \
+ 300-package-override \
+ 301-abstract-protected \
+ 303-verification-stress \
+ 401-optimizing-compiler \
+ 403-optimizing-long \
405-optimizing-long-allocator \
+ 406-fields \
407-arrays \
+ 409-materialized-condition \
410-floats \
411-optimizing-arith \
412-new-array \
413-regalloc-regression \
- 414-static-fields \
414-optimizing-arith-sub \
+ 414-static-fields \
415-optimizing-arith-neg \
+ 416-optimizing-arith-not \
417-optimizing-arith-div \
+ 418-const-string \
+ 419-long-parameter \
+ 420-const-class \
700-LoadArgRegs \
+ 701-easy-div-rem \
+ 702-LargeBranchOffset \
800-smali
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
@@ -554,6 +627,12 @@
ifeq ($(9),no-image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
run_test_options += --no-image
+ # Add the core dependency. This is required for pre-building.
+ ifeq ($(1),host)
+ prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+ else
+ prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+ endif
else
ifeq ($(9),image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
diff --git a/test/etc/default-build b/test/etc/default-build
index 3369dc6..ab859ec 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -49,4 +49,6 @@
fi
fi
-zip $TEST_NAME.jar classes.dex
+if [ ${NEED_DEX} = "true" ]; then
+ zip $TEST_NAME.jar classes.dex
+fi
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 9b1af70..d2cd8ab 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -35,10 +35,14 @@
PREBUILD="y"
QUIET="n"
RELOCATE="y"
+SECONDARY_DEX=""
+TIME_OUT="y"
+TIME_OUT_VALUE=5m
USE_GDB="n"
USE_JVM="n"
VERIFY="y"
ZYGOTE=""
+DEX_VERIFY=""
while true; do
if [ "x$1" = "x--quiet" ]; then
@@ -92,12 +96,17 @@
elif [ "x$1" = "x--no-image" ]; then
HAVE_IMAGE="n"
shift
+ elif [ "x$1" = "x--secondary" ]; then
+ SECONDARY_DEX=":$DEX_LOCATION/$TEST_NAME-ex.jar"
+ shift
elif [ "x$1" = "x--debug" ]; then
DEBUGGER="y"
+ TIME_OUT="n"
shift
elif [ "x$1" = "x--gdb" ]; then
USE_GDB="y"
DEV_MODE="y"
+ TIME_OUT="n"
shift
elif [ "x$1" = "x--zygote" ]; then
ZYGOTE="-Xzygote"
@@ -176,7 +185,6 @@
fi
if [ "$VERIFY" = "y" ]; then
- DEX_VERIFY=""
JVM_VERIFY_ARG="-Xverify:all"
msg "Performing verification"
else
@@ -210,10 +218,8 @@
if [ "$HAVE_IMAGE" = "n" ]; then
DALVIKVM_BOOT_OPT="-Ximage:/system/non-existant/core.art"
- DEX2OAT_BOOT_OPT="--boot-image=/system/non-existant/core.art"
else
DALVIKVM_BOOT_OPT="-Ximage:${BOOT_IMAGE}"
- DEX2OAT_BOOT_OPT="--boot-image=${BOOT_IMAGE}"
fi
@@ -236,7 +242,12 @@
if [ "$INTERPRETER" = "y" ]; then
INT_OPTS="-Xint"
- COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
+ if [ "$VERIFY" = "y" ] ; then
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
+ else
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
+ DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
+ fi
fi
JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
@@ -272,7 +283,7 @@
if [ "$PREBUILD" = "y" ]; then
dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/dex2oatd \
$COMPILE_FLAGS \
- $DEX2OAT_BOOT_OPT \
+ --boot-image=${BOOT_IMAGE} \
--dex-file=$DEX_LOCATION/$TEST_NAME.jar \
--oat-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.dex | cut -d/ -f 2- | sed "s:/:@:g") \
--instruction-set=$ISA"
@@ -281,6 +292,7 @@
dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
$GDB_ARGS \
$FLAGS \
+ $DEX_VERIFY \
-XXlib:$LIB \
$PATCHOAT \
$DEX2OAT \
@@ -289,7 +301,7 @@
$INT_OPTS \
$DEBUGGER_OPTS \
$DALVIKVM_BOOT_OPT \
- -cp $DEX_LOCATION/$TEST_NAME.jar $MAIN"
+ -cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN"
if [ "$HOST" = "n" ]; then
@@ -307,6 +319,14 @@
adb push $TEST_NAME-ex.jar $DEX_LOCATION >/dev/null 2>&1
fi
+ LD_LIBRARY_PATH=
+ if [ "$ANDROID_ROOT" != "/system" ]; then
+ # Current default installation is dalvikvm 64bits and dex2oat 32bits,
+ # so we can only use LD_LIBRARY_PATH when testing on a local
+ # installation.
+ LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBRARY_DIRECTORY
+ fi
+
# Create a script with the command. The command can get longer than the longest
# allowed adb command and there is no way to get the exit status from a adb shell
# command.
@@ -314,7 +334,7 @@
export ANDROID_DATA=$DEX_LOCATION && \
export DEX_LOCATION=$DEX_LOCATION && \
export ANDROID_ROOT=$ANDROID_ROOT && \
- export LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBRARY_DIRECTORY && \
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
$mkdir_cmdline && \
$dex2oat_cmdline && \
$dalvikvm_cmdline"
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index f666ad1..2b57222 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -39,6 +39,7 @@
def ProcessFile(filename):
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
in_enum = False
+ is_enum_private = False
is_enum_class = False
line_number = 0
@@ -57,15 +58,16 @@
# Except when it's private
if m.group(3) is not None:
- continue
-
- is_enum_class = m.group(1) is not None
- enum_name = m.group(2)
- if len(enclosing_classes) > 0:
- enum_name = '::'.join(enclosing_classes) + '::' + enum_name
- _ENUMS[enum_name] = []
- _NAMESPACES[enum_name] = '::'.join(namespaces)
- _ENUM_CLASSES[enum_name] = is_enum_class
+ is_enum_private = True
+ else:
+ is_enum_private = False
+ is_enum_class = m.group(1) is not None
+ enum_name = m.group(2)
+ if len(enclosing_classes) > 0:
+ enum_name = '::'.join(enclosing_classes) + '::' + enum_name
+ _ENUMS[enum_name] = []
+ _NAMESPACES[enum_name] = '::'.join(namespaces)
+ _ENUM_CLASSES[enum_name] = is_enum_class
in_enum = True
continue
@@ -80,11 +82,11 @@
continue
# Is this the start or end of an enclosing class or struct?
- m = re.compile(r'^(?:class|struct)(?: MANAGED)? (\S+).* \{').search(raw_line)
+ m = re.compile(r'^\s*(?:class|struct)(?: MANAGED)?(?: PACKED\([0-9]\))? (\S+).* \{').search(raw_line)
if m:
enclosing_classes.append(m.group(1))
continue
- m = re.compile(r'^\};').search(raw_line)
+ m = re.compile(r'^\s*\}( .*)?;').search(raw_line)
if m:
enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
continue
@@ -99,6 +101,9 @@
in_enum = False
continue
+ if is_enum_private:
+ continue
+
# The only useful thing in comments is the <<alternate text>> syntax for
# overriding the default enum value names. Pull that out...
enum_text = None
@@ -146,6 +151,7 @@
# There shouldn't be anything left.
if len(rest):
+ sys.stderr.write('%s\n' % (rest))
Confused(filename, line_number, raw_line)
if len(enclosing_classes) > 0: