summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common.mk23
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--build/Android.oat.mk8
-rw-r--r--compiler/Android.mk9
-rw-r--r--compiler/common_compiler_test.h17
-rw-r--r--compiler/compiler.cc (renamed from compiler/compiler_backend.cc)169
-rw-r--r--compiler/compiler.h (renamed from compiler/compiler_backend.h)26
-rw-r--r--compiler/compilers.cc149
-rw-r--r--compiler/compilers.h103
-rw-r--r--compiler/dex/compiler_enums.h3
-rw-r--r--compiler/dex/compiler_ir.h2
-rw-r--r--compiler/dex/frontend.cc25
-rw-r--r--compiler/dex/mir_optimization.cc2
-rw-r--r--compiler/dex/pass_driver.cc67
-rw-r--r--compiler/dex/pass_driver.h3
-rw-r--r--compiler/dex/quick/arm/call_arm.cc29
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h1
-rw-r--r--compiler/dex/quick/arm/int_arm.cc101
-rw-r--r--compiler/dex/quick/arm/target_arm.cc7
-rw-r--r--compiler/dex/quick/codegen_util.cc57
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc223
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h115
-rw-r--r--compiler/dex/quick/gen_common.cc142
-rw-r--r--compiler/dex/quick/gen_invoke.cc144
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h1
-rw-r--r--compiler/dex/quick/mips/int_mips.cc4
-rw-r--r--compiler/dex/quick/mips/target_mips.cc8
-rw-r--r--compiler/dex/quick/mir_to_lir.cc23
-rw-r--r--compiler/dex/quick/mir_to_lir.h19
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc8
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h9
-rw-r--r--compiler/dex/quick/x86/int_x86.cc4
-rw-r--r--compiler/dex/quick/x86/target_x86.cc77
-rw-r--r--compiler/dex/quick/x86/x86_lir.h6
-rw-r--r--compiler/driver/compiler_callbacks_impl.h20
-rw-r--r--compiler/driver/compiler_driver.cc73
-rw-r--r--compiler/driver/compiler_driver.h31
-rw-r--r--compiler/image_writer.cc21
-rw-r--r--compiler/jni/jni_compiler_test.cc2
-rw-r--r--compiler/llvm/compiler_llvm.cc4
-rw-r--r--compiler/oat_test.cc10
-rw-r--r--compiler/oat_writer.cc29
-rw-r--r--compiler/optimizing/builder.cc60
-rw-r--r--compiler/optimizing/builder.h5
-rw-r--r--compiler/optimizing/code_generator.cc51
-rw-r--r--compiler/optimizing/code_generator.h92
-rw-r--r--compiler/optimizing/code_generator_arm.cc118
-rw-r--r--compiler/optimizing/code_generator_arm.h32
-rw-r--r--compiler/optimizing/code_generator_x86.cc108
-rw-r--r--compiler/optimizing/code_generator_x86.h33
-rw-r--r--compiler/optimizing/codegen_test.cc88
-rw-r--r--compiler/optimizing/nodes.cc1
-rw-r--r--compiler/optimizing/nodes.h62
-rw-r--r--compiler/optimizing/optimizing_compiler.cc32
-rw-r--r--compiler/optimizing/optimizing_unit_test.h3
-rw-r--r--dex2oat/dex2oat.cc47
-rw-r--r--disassembler/disassembler.cc4
-rw-r--r--disassembler/disassembler_arm.cc6
-rw-r--r--disassembler/disassembler_arm.h9
-rw-r--r--disassembler/disassembler_arm64.h6
-rw-r--r--disassembler/disassembler_mips.cc3
-rw-r--r--disassembler/disassembler_mips.h10
-rw-r--r--disassembler/disassembler_x86.cc26
-rw-r--r--disassembler/disassembler_x86.h14
-rw-r--r--runtime/Android.mk18
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc148
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc46
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc46
-rw-r--r--runtime/arch/x86_64/fault_handler_x86_64.cc46
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S141
-rw-r--r--runtime/base/hex_dump_test.cc11
-rw-r--r--runtime/base/macros.h2
-rw-r--r--runtime/class_linker.cc85
-rw-r--r--runtime/class_linker_test.cc22
-rw-r--r--runtime/debugger.cc50
-rw-r--r--runtime/debugger.h41
-rw-r--r--runtime/elf_file.cc10
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc6
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc481
-rw-r--r--runtime/exception_test.cc8
-rw-r--r--runtime/fault_handler.cc183
-rw-r--r--runtime/fault_handler.h91
-rw-r--r--runtime/gc/allocator/rosalloc.cc108
-rw-r--r--runtime/gc/collector/immune_region.cc66
-rw-r--r--runtime/gc/collector/immune_region.h65
-rw-r--r--runtime/gc/collector/mark_sweep.cc67
-rw-r--r--runtime/gc/collector/mark_sweep.h21
-rw-r--r--runtime/gc/collector/partial_mark_sweep.cc2
-rw-r--r--runtime/gc/collector/semi_space.cc61
-rw-r--r--runtime/gc/collector/semi_space.h20
-rw-r--r--runtime/gc/heap.cc124
-rw-r--r--runtime/gc/heap.h55
-rw-r--r--runtime/gc/reference_queue.cc98
-rw-r--r--runtime/gc/reference_queue.h20
-rw-r--r--runtime/gc/space/image_space.cc33
-rw-r--r--runtime/gc/space/space-inl.h1
-rw-r--r--runtime/interpreter/interpreter_common.h1
-rw-r--r--runtime/invoke_arg_array_builder.h213
-rw-r--r--runtime/jni_internal.cc161
-rw-r--r--runtime/jni_internal.h34
-rw-r--r--runtime/jni_internal_test.cc851
-rw-r--r--runtime/mem_map.cc140
-rw-r--r--runtime/mem_map_test.cc33
-rw-r--r--runtime/method_reference.h3
-rw-r--r--runtime/mirror/art_method.cc19
-rw-r--r--runtime/mirror/art_method.h12
-rw-r--r--runtime/mirror/object-inl.h13
-rw-r--r--runtime/mirror/object.h6
-rw-r--r--runtime/mirror/reference-inl.h36
-rw-r--r--runtime/mirror/reference.h114
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc18
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/dalvik_system_VMStack.cc66
-rw-r--r--runtime/native/dalvik_system_Zygote.cc1
-rw-r--r--runtime/native/java_lang_Class.cc4
-rw-r--r--runtime/native/java_lang_Runtime.cc6
-rw-r--r--runtime/native/java_lang_Thread.cc6
-rw-r--r--runtime/native/java_lang_Throwable.cc15
-rw-r--r--runtime/native/java_lang_VMClassLoader.cc8
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc5
-rw-r--r--runtime/native/java_lang_reflect_Field.cc2
-rw-r--r--runtime/native/java_lang_reflect_Method.cc10
-rw-r--r--runtime/native/java_lang_reflect_Proxy.cc6
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc20
-rw-r--r--runtime/native/scoped_fast_native_object_access.h60
-rw-r--r--runtime/oat.cc5
-rw-r--r--runtime/object_utils.h20
-rw-r--r--runtime/parsed_options.cc82
-rw-r--r--runtime/parsed_options.h5
-rw-r--r--runtime/proxy_test.cc139
-rw-r--r--runtime/quick/inline_method_analyser.cc303
-rw-r--r--runtime/quick/inline_method_analyser.h177
-rw-r--r--runtime/reflection.cc481
-rw-r--r--runtime/reflection.h27
-rw-r--r--runtime/reflection_test.cc628
-rw-r--r--runtime/runtime.cc36
-rw-r--r--runtime/runtime.h16
-rw-r--r--runtime/scoped_thread_state_change.h23
-rw-r--r--runtime/stack.cc20
-rw-r--r--runtime/stack.h2
-rw-r--r--runtime/stack_indirect_reference_table.h16
-rw-r--r--runtime/thread.cc49
-rw-r--r--runtime/thread.h34
-rw-r--r--runtime/thread_list.cc1
-rw-r--r--runtime/transaction_test.cc1
-rw-r--r--test/044-proxy/expected.txt1
-rw-r--r--test/044-proxy/src/BasicTest.java15
-rw-r--r--test/082-inline-execute/src/Main.java3
-rw-r--r--test/202-thread-oome/src/Main.java2
-rw-r--r--test/302-float-conversion/expected.txt3
-rw-r--r--test/302-float-conversion/info.txt1
-rw-r--r--test/302-float-conversion/src/Main.java17
-rw-r--r--test/Android.mk9
153 files changed, 5523 insertions, 3220 deletions
diff --git a/build/Android.common.mk b/build/Android.common.mk
index f22eb37beb..219f1e273e 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -37,6 +37,12 @@ ART_BUILD_TARGET_DEBUG ?= true
ART_BUILD_HOST_NDEBUG ?= $(WITH_HOST_DALVIK)
ART_BUILD_HOST_DEBUG ?= $(WITH_HOST_DALVIK)
+ifeq ($(BUILD_HOST_64bit),)
+ART_HOST_ARCH := x86
+else
+ART_HOST_ARCH := x86_64
+endif
+
ifeq ($(ART_BUILD_TARGET_NDEBUG),false)
$(info Disabling ART_BUILD_TARGET_NDEBUG)
endif
@@ -87,6 +93,23 @@ $(info Enabling ART_USE_PORTABLE_COMPILER because WITH_ART_USE_PORTABLE_COMPILER
ART_USE_PORTABLE_COMPILER := true
endif
+#
+# Used to enable optimizing compiler
+#
+ART_USE_OPTIMIZING_COMPILER := false
+ifneq ($(wildcard art/USE_OPTIMIZING_COMPILER),)
+$(info Enabling ART_USE_OPTIMIZING_COMPILER because of existence of art/USE_OPTIMIZING_COMPILER)
+ART_USE_OPTIMIZING_COMPILER := true
+endif
+ifeq ($(WITH_ART_USE_OPTIMIZING_COMPILER), true)
+ART_USE_OPTIMIZING_COMPILER := true
+endif
+
+ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
+DEX2OAT_FLAGS := --compiler-backend=Optimizing
+DALVIKVM_FLAGS := -Xcompiler-option --compiler-backend=Optimizing
+endif
+
LLVM_ROOT_PATH := external/llvm
# Don't fail a dalvik minimal host build.
-include $(LLVM_ROOT_PATH)/llvm.mk
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 7829f9b011..99285cca73 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -62,6 +62,8 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
COMPILER_GTEST_COMMON_SRC_FILES := \
runtime/jni_internal_test.cc \
+ runtime/proxy_test.cc \
+ runtime/reflection_test.cc \
compiler/dex/local_value_numbering_test.cc \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 6012421a59..def585b787 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -40,19 +40,13 @@ TARGET_CORE_IMG_OUT := $(ART_TEST_OUT)/core.art
TARGET_INSTRUCTION_SET_FEATURES := $(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES)
-ifeq ($(BUILD_HOST_64bit),)
-host_arch := x86
-else
-host_arch := x86_64
-endif
-
$(HOST_CORE_IMG_OUT): $(HOST_CORE_DEX_FILES) $(DEX2OAT_DEPENDENCY)
@echo "host dex2oat: $@ ($?)"
@mkdir -p $(dir $@)
$(hide) $(DEX2OAT) --runtime-arg -Xms16m --runtime-arg -Xmx16m --image-classes=$(PRELOADED_CLASSES) $(addprefix \
--dex-file=,$(HOST_CORE_DEX_FILES)) $(addprefix --dex-location=,$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$(HOST_CORE_OAT_OUT) \
--oat-location=$(HOST_CORE_OAT) --image=$(HOST_CORE_IMG_OUT) --base=$(LIBART_IMG_HOST_BASE_ADDRESS) \
- --instruction-set=$(host_arch) --host --android-root=$(HOST_OUT)
+ --instruction-set=$(ART_HOST_ARCH) --host --android-root=$(HOST_OUT)
$(TARGET_CORE_IMG_OUT): $(TARGET_CORE_DEX_FILES) $(DEX2OAT_DEPENDENCY)
@echo "target dex2oat: $@ ($?)"
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 48e2bcd732..bcd120b413 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -75,6 +75,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/code_generator_arm.cc \
optimizing/code_generator_x86.cc \
optimizing/nodes.cc \
+ optimizing/optimizing_compiler.cc \
trampolines/trampoline_compiler.cc \
utils/arena_allocator.cc \
utils/arena_bit_vector.cc \
@@ -89,7 +90,8 @@ LIBART_COMPILER_SRC_FILES := \
utils/x86/managed_register_x86.cc \
utils/scoped_arena_allocator.cc \
buffered_output_stream.cc \
- compiler_backend.cc \
+ compilers.cc \
+ compiler.cc \
elf_fixup.cc \
elf_stripper.cc \
elf_writer.cc \
@@ -132,8 +134,7 @@ LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/compiler_enums.h \
- dex/quick/dex_file_method_inliner.h
+ dex/compiler_enums.h
# $(1): target or host
# $(2): ndebug or debug
@@ -210,7 +211,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_SHARED_LIBRARIES += libart
endif
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
- LOCAL_SHARED_LIBRARIES += libbcc libbcinfo libLLVM
+ LOCAL_SHARED_LIBRARIES += libLLVM
LOCAL_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
ifeq ($$(art_target_or_host),target)
LOCAL_STATIC_LIBRARIES_arm += libmcldARMInfo libmcldARMTarget
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index def7b681dc..49c1283809 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_COMMON_COMPILER_TEST_H_
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_callbacks.h"
#include "common_runtime_test.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
@@ -219,8 +219,15 @@ class CommonCompilerTest : public CommonRuntimeTest {
} else {
const void* method_code = GetQuickGenericJniTrampoline();
mirror::ArtMethod* callee_save_method = runtime_->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+
+ // Compute Sirt size, as Sirt goes into frame
+ MethodHelper mh(method);
+ uint32_t sirt_refs = mh.GetNumberOfReferenceArgsWithoutReceiver() + 1;
+ uint32_t sirt_size = StackIndirectReferenceTable::SizeOf(sirt_refs);
+
OatFile::OatMethod oat_method = CreateOatMethod(method_code,
- callee_save_method->GetFrameSizeInBytes(),
+ callee_save_method->GetFrameSizeInBytes() +
+ sirt_size,
callee_save_method->GetCoreSpillMask(),
callee_save_method->GetFpSpillMask(),
nullptr,
@@ -312,13 +319,13 @@ class CommonCompilerTest : public CommonRuntimeTest {
}
// TODO: make selectable
- CompilerBackend::Kind compiler_backend
- = (kUsePortableCompiler) ? CompilerBackend::kPortable : CompilerBackend::kQuick;
+ Compiler::Kind compiler_kind
+ = (kUsePortableCompiler) ? Compiler::kPortable : Compiler::kQuick;
timer_.reset(new CumulativeLogger("Compilation times"));
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
method_inliner_map_.get(),
- compiler_backend, instruction_set,
+ compiler_kind, instruction_set,
instruction_set_features,
true, new CompilerDriver::DescriptorSet,
2, true, true, timer_.get()));
diff --git a/compiler/compiler_backend.cc b/compiler/compiler.cc
index 0afa665eb7..c88c38ed17 100644
--- a/compiler/compiler_backend.cc
+++ b/compiler/compiler.cc
@@ -14,10 +14,8 @@
* limitations under the License.
*/
-#include "compiler_backend.h"
-#include "elf_writer_quick.h"
-#include "dex/quick/mir_to_lir.h"
-#include "dex/mir_graph.h"
+#include "compiler.h"
+#include "compilers.h"
#include "driver/compiler_driver.h"
#include "mirror/art_method-inl.h"
@@ -29,7 +27,7 @@
namespace art {
#ifdef ART_SEA_IR_MODE
-extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler,
+extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& driver,
const art::DexFile::CodeItem* code_item,
uint32_t access_flags,
art::InvokeType invoke_type,
@@ -39,30 +37,15 @@ extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler
const art::DexFile& dex_file);
#endif
-extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver);
-extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& driver);
-extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
-extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler,
- uint32_t access_flags, uint32_t method_idx,
- const art::DexFile& dex_file);
-
-
-static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file) {
+CompiledMethod* Compiler::TryCompileWithSeaIR(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file) {
#ifdef ART_SEA_IR_MODE
bool use_sea = Runtime::Current()->IsSeaIRMode();
use_sea = use_sea &&
@@ -83,117 +66,6 @@ static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& compiler,
}
-// Hack for CFI CIE initialization
-extern std::vector<uint8_t>* X86CFIInitialization();
-
-class QuickBackend : public CompilerBackend {
- public:
- QuickBackend() : CompilerBackend(100) {}
-
- void Init(CompilerDriver& driver) const {
- ArtInitQuickCompilerContext(driver);
- }
-
- void UnInit(CompilerDriver& driver) const {
- ArtUnInitQuickCompilerContext(driver);
- }
-
- CompiledMethod* Compile(CompilerDriver& compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
- CompiledMethod* method = TryCompileWithSeaIR(compiler,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- if (method != nullptr) return method;
-
- return ArtQuickCompileMethod(compiler,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- }
-
- CompiledMethod* JniCompile(CompilerDriver& driver,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) const {
- return ArtQuickJniCompileMethod(driver, access_flags, method_idx, dex_file);
- }
-
- uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
- }
-
- bool WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host, const CompilerDriver& driver) const
- OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, driver);
- }
-
- Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
- Mir2Lir* mir_to_lir = nullptr;
- switch (cu->instruction_set) {
- case kThumb2:
- mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- case kMips:
- mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- case kX86:
- mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- default:
- LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
- }
-
- /* The number of compiler temporaries depends on backend so set it up now if possible */
- if (mir_to_lir) {
- size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
- bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
- CHECK(set_max);
- }
- return mir_to_lir;
- }
-
- void InitCompilationUnit(CompilationUnit& cu) const {}
-
- /*
- * @brief Generate and return Dwarf CFI initialization, if supported by the
- * backend.
- * @param driver CompilerDriver for this compile.
- * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
- * information.
- * @note This is used for backtrace information in generated code.
- */
- std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
- OVERRIDE {
- if (driver.GetInstructionSet() == kX86) {
- return X86CFIInitialization();
- }
- return nullptr;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(QuickBackend);
-};
-
#ifdef ART_USE_PORTABLE_COMPILER
extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver);
@@ -217,9 +89,9 @@ extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver,
std::string const& filename);
-class LLVMBackend : public CompilerBackend {
+class LLVMCompiler : public Compiler {
public:
- LLVMBackend() : CompilerBackend(1000) {}
+ LLVMCompiler() : Compiler(1000) {}
void Init(CompilerDriver& driver) const {
ArtInitCompilerContext(driver);
@@ -229,7 +101,7 @@ class LLVMBackend : public CompilerBackend {
ArtUnInitCompilerContext(driver);
}
- CompiledMethod* Compile(CompilerDriver& compiler,
+ CompiledMethod* Compile(CompilerDriver& driver,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -237,7 +109,7 @@ class LLVMBackend : public CompilerBackend {
uint32_t method_idx,
jobject class_loader,
const DexFile& dex_file) const {
- CompiledMethod* method = TryCompileWithSeaIR(compiler,
+ CompiledMethod* method = TryCompileWithSeaIR(driver,
code_item,
access_flags,
invoke_type,
@@ -306,18 +178,21 @@ class LLVMBackend : public CompilerBackend {
}
private:
- DISALLOW_COPY_AND_ASSIGN(LLVMBackend);
+ DISALLOW_COPY_AND_ASSIGN(LLVMCompiler);
};
#endif
-CompilerBackend* CompilerBackend::Create(CompilerBackend::Kind kind) {
+Compiler* Compiler::Create(Compiler::Kind kind) {
switch (kind) {
case kQuick:
- return new QuickBackend();
+ return new QuickCompiler();
+ break;
+ case kOptimizing:
+ return new OptimizingCompiler();
break;
case kPortable:
#ifdef ART_USE_PORTABLE_COMPILER
- return new LLVMBackend();
+ return new LLVMCompiler();
#else
LOG(FATAL) << "Portable compiler not compiled";
#endif
diff --git a/compiler/compiler_backend.h b/compiler/compiler.h
index b473806bba..1d5fc24e9f 100644
--- a/compiler/compiler_backend.h
+++ b/compiler/compiler.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_COMPILER_BACKEND_H_
-#define ART_COMPILER_COMPILER_BACKEND_H_
+#ifndef ART_COMPILER_COMPILER_H_
+#define ART_COMPILER_COMPILER_H_
#include "dex_file.h"
#include "os.h"
@@ -33,18 +33,19 @@ namespace mirror {
class ArtMethod;
}
-class CompilerBackend {
+class Compiler {
public:
enum Kind {
kQuick,
+ kOptimizing,
kPortable
};
- explicit CompilerBackend(uint64_t warning)
+ explicit Compiler(uint64_t warning)
: maximum_compilation_time_before_warning_(warning) {
}
- static CompilerBackend* Create(Kind kind);
+ static Compiler* Create(Kind kind);
virtual void Init(CompilerDriver& driver) const = 0;
@@ -59,6 +60,15 @@ class CompilerBackend {
jobject class_loader,
const DexFile& dex_file) const = 0;
+ static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
virtual CompiledMethod* JniCompile(CompilerDriver& driver,
uint32_t access_flags,
uint32_t method_idx,
@@ -91,7 +101,7 @@ class CompilerBackend {
virtual void InitCompilationUnit(CompilationUnit& cu) const = 0;
- virtual ~CompilerBackend() {}
+ virtual ~Compiler() {}
/*
* @brief Generate and return Dwarf CFI initialization, if supported by the
@@ -109,9 +119,9 @@ class CompilerBackend {
private:
const uint64_t maximum_compilation_time_before_warning_;
- DISALLOW_COPY_AND_ASSIGN(CompilerBackend);
+ DISALLOW_COPY_AND_ASSIGN(Compiler);
};
} // namespace art
-#endif // ART_COMPILER_COMPILER_BACKEND_H_
+#endif // ART_COMPILER_COMPILER_H_
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
new file mode 100644
index 0000000000..f58b38b570
--- /dev/null
+++ b/compiler/compilers.cc
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compilers.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir.h"
+#include "elf_writer_quick.h"
+#include "mirror/art_method-inl.h"
+
+namespace art {
+
+extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver);
+extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& driver);
+extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& driver,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::DexFile& dex_file);
+
+// Hack for CFI CIE initialization
+extern std::vector<uint8_t>* X86CFIInitialization();
+
+void QuickCompiler::Init(CompilerDriver& driver) const {
+ ArtInitQuickCompilerContext(driver);
+}
+
+void QuickCompiler::UnInit(CompilerDriver& driver) const {
+ ArtUnInitQuickCompilerContext(driver);
+}
+
+CompiledMethod* QuickCompiler::Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompileWithSeaIR(driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+ if (method != nullptr) return method;
+
+ return ArtQuickCompileMethod(driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+}
+
+CompiledMethod* QuickCompiler::JniCompile(CompilerDriver& driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) const {
+ return ArtQuickJniCompileMethod(driver, access_flags, method_idx, dex_file);
+}
+
+uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+}
+
+bool QuickCompiler::WriteElf(art::File* file,
+ OatWriter* oat_writer,
+ const std::vector<const art::DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host, const CompilerDriver& driver) const {
+ return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, driver);
+}
+
+Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ Mir2Lir* mir_to_lir = nullptr;
+ switch (cu->instruction_set) {
+ case kThumb2:
+ mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kMips:
+ mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kX86:
+ mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+ }
+
+ /* The number of compiler temporaries depends on backend so set it up now if possible */
+ if (mir_to_lir) {
+ size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
+ bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
+ CHECK(set_max);
+ }
+ return mir_to_lir;
+}
+
+std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
+ const CompilerDriver& driver) const {
+ if (driver.GetInstructionSet() == kX86) {
+ return X86CFIInitialization();
+ }
+ return nullptr;
+}
+
+CompiledMethod* OptimizingCompiler::Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompile(
+ driver, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+ if (method != nullptr) return method;
+
+ return QuickCompiler::Compile(
+ driver, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+}
+
+} // namespace art
diff --git a/compiler/compilers.h b/compiler/compilers.h
new file mode 100644
index 0000000000..892a6bd1d5
--- /dev/null
+++ b/compiler/compilers.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_COMPILERS_H_
+#define ART_COMPILER_COMPILERS_H_
+
+#include "compiler.h"
+
+namespace art {
+
+class QuickCompiler : public Compiler {
+ public:
+ QuickCompiler() : Compiler(100) {}
+
+ void Init(CompilerDriver& driver) const OVERRIDE;
+
+ void UnInit(CompilerDriver& driver) const OVERRIDE;
+
+ CompiledMethod* Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const OVERRIDE;
+
+ CompiledMethod* JniCompile(CompilerDriver& driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) const OVERRIDE;
+
+ uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE;
+
+ bool WriteElf(art::File* file,
+ OatWriter* oat_writer,
+ const std::vector<const art::DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host, const CompilerDriver& driver) const
+ OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
+
+ void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE {}
+
+ /*
+ * @brief Generate and return Dwarf CFI initialization, if supported by the
+ * backend.
+ * @param driver CompilerDriver for this compile.
+ * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
+ * information.
+ * @note This is used for backtrace information in generated code.
+ */
+ std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
+ OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
+};
+
+class OptimizingCompiler : public QuickCompiler {
+ public:
+ OptimizingCompiler() { }
+
+ CompiledMethod* Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const OVERRIDE;
+
+ CompiledMethod* TryCompile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_COMPILERS_H_
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 0cd9ba3603..cd215684bb 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -390,7 +390,8 @@ enum OpFeatureFlags {
kRegUsePC,
kRegUseSP,
kSetsCCodes,
- kUsesCCodes
+ kUsesCCodes,
+ kUseFpStack
};
enum SelectInstructionKind {
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index c71f0473f1..70159cae9f 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -62,7 +62,7 @@ struct CompilationUnit {
uint32_t disable_opt; // opt_control_vector flags.
uint32_t enable_debug; // debugControlVector flags.
bool verbose;
- const CompilerBackend* compiler_backend;
+ const Compiler* compiler;
InstructionSet instruction_set;
bool target64;
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 3bd71d1c0a..83fbca5aca 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_internals.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -23,7 +23,6 @@
#include "mirror/object.h"
#include "pass_driver.h"
#include "runtime.h"
-#include "backend.h"
#include "base/logging.h"
#include "base/timing_logger.h"
#include "driver/compiler_options.h"
@@ -90,7 +89,7 @@ CompilationUnit::CompilationUnit(ArenaPool* pool)
disable_opt(0),
enable_debug(0),
verbose(false),
- compiler_backend(NULL),
+ compiler(NULL),
instruction_set(kNone),
num_dalvik_registers(0),
insns(NULL),
@@ -131,7 +130,7 @@ void CompilationUnit::EndTiming() {
}
static CompiledMethod* CompileMethod(CompilerDriver& driver,
- CompilerBackend* compiler_backend,
+ Compiler* compiler,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
uint16_t class_def_idx, uint32_t method_idx,
@@ -157,7 +156,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
cu.class_linker = class_linker;
cu.instruction_set = driver.GetInstructionSet();
cu.target64 = cu.instruction_set == kX86_64;
- cu.compiler_backend = compiler_backend;
+ cu.compiler = compiler;
// TODO: x86_64 is not yet implemented.
DCHECK((cu.instruction_set == kThumb2) ||
(cu.instruction_set == kX86) ||
@@ -184,7 +183,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
* MIR and backend flags? Need command-line setting as well.
*/
- compiler_backend->InitCompilationUnit(cu);
+ compiler->InitCompilationUnit(cu);
if (cu.instruction_set == kMips) {
// Disable some optimizations for mips for now
@@ -209,7 +208,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
* The reason we do this is that optimizations on the MIR graph may need to get information
* that is only available if a CG exists.
*/
- cu.cg.reset(compiler_backend->GetCodeGenerator(&cu, llvm_compilation_unit));
+ cu.cg.reset(compiler->GetCodeGenerator(&cu, llvm_compilation_unit));
/* Gathering opcode stats? */
if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
@@ -286,8 +285,8 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
return result;
}
-CompiledMethod* CompileOneMethod(CompilerDriver& compiler,
- CompilerBackend* backend,
+CompiledMethod* CompileOneMethod(CompilerDriver& driver,
+ Compiler* compiler,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -296,21 +295,21 @@ CompiledMethod* CompileOneMethod(CompilerDriver& compiler,
jobject class_loader,
const DexFile& dex_file,
void* compilation_unit) {
- return CompileMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
+ return CompileMethod(driver, compiler, code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file, compilation_unit);
}
} // namespace art
extern "C" art::CompiledMethod*
- ArtQuickCompileMethod(art::CompilerDriver& compiler,
+ ArtQuickCompileMethod(art::CompilerDriver& driver,
const art::DexFile::CodeItem* code_item,
uint32_t access_flags, art::InvokeType invoke_type,
uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
const art::DexFile& dex_file) {
// TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default
- art::CompilerBackend* backend = compiler.GetCompilerBackend();
- return art::CompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
+ art::Compiler* compiler = driver.GetCompiler();
+ return art::CompileOneMethod(driver, compiler, code_item, access_flags, invoke_type,
class_def_idx, method_idx, class_loader, dex_file,
NULL /* use thread llvm_info */);
}
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 03fc091e4d..cb737ab294 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -405,7 +405,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
// Is this the select pattern?
// TODO: flesh out support for Mips. NOTE: llvm's select op doesn't quite work here.
// TUNING: expand to support IF_xx compare & branches
- if (!cu_->compiler_backend->IsPortable() &&
+ if (!cu_->compiler->IsPortable() &&
(cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index 291012f0d9..72d3ea6377 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -82,31 +82,48 @@ void PassDriver::InsertPass(const Pass* new_pass) {
pass_list_.push_back(new_pass);
}
-void PassDriver::CreatePasses() {
- /*
- * Create the pass list. These passes are immutable and are shared across the threads.
- *
- * Advantage is that there will be no race conditions here.
- * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
- * - This is not yet an issue: no current pass would require it.
- */
- static const Pass* const passes[] = {
- GetPassInstance<CacheFieldLoweringInfo>(),
- GetPassInstance<CacheMethodLoweringInfo>(),
- GetPassInstance<CodeLayout>(),
- GetPassInstance<SSATransformation>(),
- GetPassInstance<ConstantPropagation>(),
- GetPassInstance<InitRegLocations>(),
- GetPassInstance<MethodUseCount>(),
- GetPassInstance<NullCheckEliminationAndTypeInferenceInit>(),
- GetPassInstance<NullCheckEliminationAndTypeInference>(),
- GetPassInstance<BBCombine>(),
- GetPassInstance<BBOptimizations>(),
- };
+/*
+ * Create the pass list. These passes are immutable and are shared across the threads.
+ *
+ * Advantage is that there will be no race conditions here.
+ * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
+ * - This is not yet an issue: no current pass would require it.
+ */
+static const Pass* const gPasses[] = {
+ GetPassInstance<CacheFieldLoweringInfo>(),
+ GetPassInstance<CacheMethodLoweringInfo>(),
+ GetPassInstance<CodeLayout>(),
+ GetPassInstance<SSATransformation>(),
+ GetPassInstance<ConstantPropagation>(),
+ GetPassInstance<InitRegLocations>(),
+ GetPassInstance<MethodUseCount>(),
+ GetPassInstance<NullCheckEliminationAndTypeInferenceInit>(),
+ GetPassInstance<NullCheckEliminationAndTypeInference>(),
+ GetPassInstance<BBCombine>(),
+ GetPassInstance<BBOptimizations>(),
+};
+
+// The default pass list is used by CreatePasses to initialize pass_list_.
+static std::vector<const Pass*> gDefaultPassList(gPasses, gPasses + arraysize(gPasses));
+
+void PassDriver::CreateDefaultPassList(const std::string& disable_passes) {
+ // Insert each pass from gPasses into gDefaultPassList.
+ gDefaultPassList.clear();
+ gDefaultPassList.reserve(arraysize(gPasses));
+ for (const Pass* pass : gPasses) {
+ // Check if we should disable this pass.
+ if (disable_passes.find(pass->GetName()) != std::string::npos) {
+ LOG(INFO) << "Skipping " << pass->GetName();
+ } else {
+ gDefaultPassList.push_back(pass);
+ }
+ }
+}
+void PassDriver::CreatePasses() {
// Insert each pass into the list via the InsertPass method.
- pass_list_.reserve(arraysize(passes));
- for (const Pass* pass : passes) {
+ pass_list_.reserve(gDefaultPassList.size());
+ for (const Pass* pass : gDefaultPassList) {
InsertPass(pass);
}
}
@@ -221,10 +238,10 @@ void PassDriver::Launch() {
}
}
-void PassDriver::PrintPassNames() const {
+void PassDriver::PrintPassNames() {
LOG(INFO) << "Loop Passes are:";
- for (const Pass* cur_pass : pass_list_) {
+ for (const Pass* cur_pass : gPasses) {
LOG(INFO) << "\t-" << cur_pass->GetName();
}
}
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index c734d3e0eb..2b7196e187 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -73,7 +73,8 @@ class PassDriver {
*/
void DispatchPass(CompilationUnit* c_unit, const Pass* pass);
- void PrintPassNames() const;
+ static void PrintPassNames();
+ static void CreateDefaultPassList(const std::string& disable_passes);
const Pass* GetPass(const char* name) const;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 0fce5bbb3d..bba3d40409 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -214,8 +214,9 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
GenMemBarrier(kLoadLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
- GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+ GenNullCheck(r0, opt_flags);
LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ MarkPossibleNullPointerException(opt_flags);
NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
OpRegImm(kOpCmp, r1, 0);
OpIT(kCondEq, "");
@@ -273,8 +274,9 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
GenMemBarrier(kStoreLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
- GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
+ GenNullCheck(r0, opt_flags);
LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
+ MarkPossibleNullPointerException(opt_flags);
LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
LoadConstantNoClobber(r3, 0);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
@@ -340,8 +342,10 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Thread::kStackOverflowReservedBytes));
NewLIR0(kPseudoMethodEntry);
if (!skip_overflow_check) {
- /* Load stack limit */
- LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ /* Load stack limit */
+ LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ }
}
/* Spill core callee saves */
NewLIR1(kThumb2Push, core_spill_mask_);
@@ -355,9 +359,20 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
NewLIR1(kThumb2VPushCS, num_fp_spills_);
}
if (!skip_overflow_check) {
- OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
- GenRegRegCheck(kCondUlt, rARM_LR, r12, kThrowStackOverflow);
- OpRegCopy(rARM_SP, rARM_LR); // Establish stack
+ if (Runtime::Current()->ExplicitStackOverflowChecks()) {
+ OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
+ GenRegRegCheck(kCondUlt, rARM_LR, r12, kThrowStackOverflow);
+ OpRegCopy(rARM_SP, rARM_LR); // Establish stack
+ } else {
+ // Implicit stack overflow check.
+ // Generate a load from [sp, #-framesize]. If this is in the stack
+ // redzone we will get a segmentation fault.
+ uint32_t full_frame_size = frame_size_ - (spill_count * 4);
+
+ OpRegImm(kOpSub, rARM_SP, full_frame_size);
+ LoadWordDisp(rARM_SP, 0, rARM_LR);
+ MarkPossibleStackOverflowException();
+ }
} else {
OpRegImm(kOpSub, rARM_SP, frame_size_ - (spill_count * 4));
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 2c0cead6ca..6e72c80e8a 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -30,6 +30,7 @@ class ArmMir2Lir : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
int LoadHelper(ThreadOffset offset);
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index fb2096f1e9..1d959fa9de 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -816,7 +816,12 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
/*
- * To pull off inline multiply, we have a worst-case requirement of 8 temporary
+ * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
+ * dest = src1.lo * src2.lo;
+ * tmp1 += src1.lo * src2.hi;
+ * dest.hi += tmp1;
+ *
+ * To pull off inline multiply, we have a worst-case requirement of 7 temporary
* registers. Normally for Arm, we get 5. We can get to 6 by including
* lr in the temp set. The only problematic case is all operands and result are
* distinct, and none have been promoted. In that case, we can succeed by aggressively
@@ -833,57 +838,85 @@ void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
StoreValueWide(rl_dest, rl_result);
return;
}
+
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+ int reg_status = 0;
+ int res_lo = INVALID_REG;
+ int res_hi = INVALID_REG;
+ bool dest_promoted = rl_dest.location == kLocPhysReg && !rl_dest.reg.IsInvalid() &&
+ !IsTemp(rl_dest.reg.GetReg()) && !IsTemp(rl_dest.reg.GetHighReg());
+ bool src1_promoted = !IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg());
+ bool src2_promoted = !IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg());
+ // Check if rl_dest is *not* either operand and we have enough temp registers.
+ if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
+ (dest_promoted || src1_promoted || src2_promoted)) {
+ // In this case, we do not need to manually allocate temp registers for result.
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ res_lo = rl_result.reg.GetReg();
+ res_hi = rl_result.reg.GetHighReg();
+ } else {
+ res_lo = AllocTemp();
+ if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
+ // In this case, we have enough temp registers to be allocated for result.
+ res_hi = AllocTemp();
+ reg_status = 1;
+ } else {
+ // In this case, all temps are now allocated.
+ // res_hi will be allocated after we can free src1_hi.
+ reg_status = 2;
+ }
+ }
+
// Temporarily add LR to the temp pool, and assign it to tmp1
MarkTemp(rARM_LR);
FreeTemp(rARM_LR);
int tmp1 = rARM_LR;
LockTemp(rARM_LR);
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
- bool special_case = true;
- // If operands are the same, or any pair has been promoted we're not the special case.
- if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
- (!IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg())) ||
- (!IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg()))) {
- special_case = false;
- }
- // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
- int res_lo = AllocTemp();
- int res_hi;
if (rl_src1.reg.GetReg() == rl_src2.reg.GetReg()) {
- res_hi = AllocTemp();
+ DCHECK_NE(res_hi, INVALID_REG);
+ DCHECK_NE(res_lo, INVALID_REG);
NewLIR3(kThumb2MulRRR, tmp1, rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
} else {
- // In the special case, all temps are now allocated
NewLIR3(kThumb2MulRRR, tmp1, rl_src2.reg.GetReg(), rl_src1.reg.GetHighReg());
- if (special_case) {
+ if (reg_status == 2) {
+ DCHECK_EQ(res_hi, INVALID_REG);
DCHECK_NE(rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
FreeTemp(rl_src1.reg.GetHighReg());
+ res_hi = AllocTemp();
}
- res_hi = AllocTemp();
-
+ DCHECK_NE(res_hi, INVALID_REG);
+ DCHECK_NE(res_lo, INVALID_REG);
NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
NewLIR4(kThumb2Mla, tmp1, rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg(), tmp1);
NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
- if (special_case) {
+ if (reg_status == 2) {
+ // Clobber rl_src1 since it was corrupted.
FreeTemp(rl_src1.reg.GetReg());
Clobber(rl_src1.reg.GetReg());
Clobber(rl_src1.reg.GetHighReg());
}
}
- FreeTemp(tmp1);
- rl_result = GetReturnWide(false); // Just using as a template.
- rl_result.reg.SetReg(res_lo);
- rl_result.reg.SetHighReg(res_hi);
- StoreValueWide(rl_dest, rl_result);
+
// Now, restore lr to its non-temp status.
+ FreeTemp(tmp1);
Clobber(rARM_LR);
UnmarkTemp(rARM_LR);
+
+ if (reg_status != 0) {
+ // We had manually allocated registers for rl_result.
+ // Now construct a RegLocation.
+ rl_result = GetReturnWide(false); // Just using as a template.
+ rl_result.reg.SetReg(res_lo);
+ rl_result.reg.SetHighReg(res_hi);
+ }
+
+ StoreValueWide(rl_dest, rl_result);
}
void ArmMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -938,7 +971,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -946,6 +979,9 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
/* Get len */
LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
}
if (rl_dest.wide || rl_dest.fp || constant_index) {
int reg_ptr;
@@ -969,13 +1005,16 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
+ INVALID_SREG);
+ MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
}
StoreValueWide(rl_dest, rl_result);
} else {
LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG);
+ MarkPossibleNullPointerException(opt_flags);
if (!constant_index) {
FreeTemp(reg_ptr);
}
@@ -993,6 +1032,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
+ MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
}
@@ -1038,7 +1078,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -1047,6 +1087,9 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
// NOTE: max live temps(4) here.
/* Get len */
LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg.GetReg(), opt_flags);
}
/* at this point, reg_ptr points to array, 2 live temps */
if (rl_src.wide || rl_src.fp || constant_index) {
@@ -1073,6 +1116,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
} else {
StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size);
}
+ MarkPossibleNullPointerException(opt_flags);
} else {
/* reg_ptr -> array data */
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
@@ -1083,6 +1127,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
scale, size);
+ MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 01d669b90c..7f8656a522 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -727,6 +727,13 @@ int ArmMir2Lir::LoadHelper(ThreadOffset offset) {
return rARM_LR;
}
+LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
+ int tmp = r0;
+ LoadWordDisp(rARM_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
+ LIR* load2 = LoadWordDisp(tmp, 0, tmp);
+ return load2;
+}
+
uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) {
DCHECK(!IsPseudoLirOp(opcode));
return ArmMir2Lir::EncodingMap[opcode].flags;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 34d3834682..9e5ec6e136 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -409,46 +409,52 @@ void Mir2Lir::InstallLiteralPools() {
// Push code and method literals, record offsets for the compiler to patch.
data_lir = code_literal_list_;
while (data_lir != NULL) {
- uint32_t target = data_lir->operands[0];
+ uint32_t target_method_idx = data_lir->operands[0];
+ const DexFile* target_dex_file =
+ reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
cu_->compiler_driver->AddCodePatch(cu_->dex_file,
cu_->class_def_idx,
cu_->method_idx,
cu_->invoke_type,
- target,
- static_cast<InvokeType>(data_lir->operands[1]),
+ target_method_idx,
+ target_dex_file,
+ static_cast<InvokeType>(data_lir->operands[2]),
code_buffer_.size());
- const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
+ const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id, cu_->target64);
+ PushPointer(code_buffer_, &target_method_id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
while (data_lir != NULL) {
- uint32_t target = data_lir->operands[0];
+ uint32_t target_method_idx = data_lir->operands[0];
+ const DexFile* target_dex_file =
+ reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
cu_->class_def_idx,
cu_->method_idx,
cu_->invoke_type,
- target,
- static_cast<InvokeType>(data_lir->operands[1]),
+ target_method_idx,
+ target_dex_file,
+ static_cast<InvokeType>(data_lir->operands[2]),
code_buffer_.size());
- const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
+ const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id, cu_->target64);
+ PushPointer(code_buffer_, &target_method_id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
// Push class literals.
data_lir = class_literal_list_;
while (data_lir != NULL) {
- uint32_t target = data_lir->operands[0];
+ uint32_t target_method_idx = data_lir->operands[0];
cu_->compiler_driver->AddClassPatch(cu_->dex_file,
cu_->class_def_idx,
cu_->method_idx,
- target,
+ target_method_idx,
code_buffer_.size());
- const DexFile::TypeId& id = cu_->dex_file->GetTypeId(target);
+ const DexFile::TypeId& target_method_id = cu_->dex_file->GetTypeId(target_method_idx);
// unique value based on target to ensure code deduplication works
- PushPointer(code_buffer_, &id, cu_->target64);
+ PushPointer(code_buffer_, &target_method_id, cu_->target64);
data_lir = NEXT_LIR(data_lir);
}
}
@@ -997,7 +1003,6 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
- intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
tempreg_info_(arena, 20, kGrowableArrayMisc),
reginfo_map_(arena, 64, kGrowableArrayMisc),
pointer_storage_(arena, 128, kGrowableArrayMisc),
@@ -1197,22 +1202,28 @@ void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) {
slow_paths_.Insert(slowpath);
}
-void Mir2Lir::LoadCodeAddress(int dex_method_index, InvokeType type, SpecialTargetRegister symbolic_reg) {
- LIR* data_target = ScanLiteralPool(code_literal_list_, dex_method_index, 0);
+void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
+ SpecialTargetRegister symbolic_reg) {
+ int target_method_idx = target_method.dex_method_index;
+ LIR* data_target = ScanLiteralPool(code_literal_list_, target_method_idx, 0);
if (data_target == NULL) {
- data_target = AddWordData(&code_literal_list_, dex_method_index);
- data_target->operands[1] = type;
+ data_target = AddWordData(&code_literal_list_, target_method_idx);
+ data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
+ data_target->operands[2] = type;
}
LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target);
AppendLIR(load_pc_rel);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
}
-void Mir2Lir::LoadMethodAddress(int dex_method_index, InvokeType type, SpecialTargetRegister symbolic_reg) {
- LIR* data_target = ScanLiteralPool(method_literal_list_, dex_method_index, 0);
+void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
+ SpecialTargetRegister symbolic_reg) {
+ int target_method_idx = target_method.dex_method_index;
+ LIR* data_target = ScanLiteralPool(method_literal_list_, target_method_idx, 0);
if (data_target == NULL) {
- data_target = AddWordData(&method_literal_list_, dex_method_index);
- data_target->operands[1] = type;
+ data_target = AddWordData(&method_literal_list_, target_method_idx);
+ data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
+ data_target->operands[2] = type;
}
LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target);
AppendLIR(load_pc_rel);
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 7423393e13..e50ba24ec3 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -14,7 +14,10 @@
* limitations under the License.
*/
+#include "dex_file_method_inliner.h"
+
#include <algorithm>
+
#include "base/macros.h"
#include "base/mutex.h"
#include "base/mutex-inl.h"
@@ -26,24 +29,8 @@
#include "verifier/method_verifier.h"
#include "verifier/method_verifier-inl.h"
-#include "dex_file_method_inliner.h"
-
namespace art {
-namespace { // anonymous namespace
-
-constexpr uint8_t kIGetIPutOpSizes[] = {
- kWord, // IGET, IPUT
- kLong, // IGET_WIDE, IPUT_WIDE
- kWord, // IGET_OBJECT, IPUT_OBJECT
- kSignedByte, // IGET_BOOLEAN, IPUT_BOOLEAN
- kSignedByte, // IGET_BYTE, IPUT_BYTE
- kUnsignedHalf, // IGET_CHAR, IPUT_CHAR
- kSignedHalf, // IGET_SHORT, IPUT_SHORT
-};
-
-} // anonymous namespace
-
const uint32_t DexFileMethodInliner::kIndexUnresolved;
const char* const DexFileMethodInliner::kClassCacheNames[] = {
"Z", // kClassCacheBoolean
@@ -271,56 +258,10 @@ DexFileMethodInliner::~DexFileMethodInliner() {
bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier) {
InlineMethod method;
- bool success = AnalyseMethodCode(verifier, &method);
+ bool success = InlineMethodAnalyser::AnalyseMethodCode(verifier, &method);
return success && AddInlineMethod(verifier->GetMethodReference().dex_method_index, method);
}
-bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier,
- InlineMethod* method) {
- // We currently support only plain return or 2-instruction methods.
-
- const DexFile::CodeItem* code_item = verifier->CodeItem();
- DCHECK_NE(code_item->insns_size_in_code_units_, 0u);
- const Instruction* instruction = Instruction::At(code_item->insns_);
- Instruction::Code opcode = instruction->Opcode();
-
- switch (opcode) {
- case Instruction::RETURN_VOID:
- method->opcode = kInlineOpNop;
- method->flags = kInlineSpecial;
- method->d.data = 0u;
- return true;
- case Instruction::RETURN:
- case Instruction::RETURN_OBJECT:
- case Instruction::RETURN_WIDE:
- return AnalyseReturnMethod(code_item, method);
- case Instruction::CONST:
- case Instruction::CONST_4:
- case Instruction::CONST_16:
- case Instruction::CONST_HIGH16:
- // TODO: Support wide constants (RETURN_WIDE).
- return AnalyseConstMethod(code_item, method);
- case Instruction::IGET:
- case Instruction::IGET_OBJECT:
- case Instruction::IGET_BOOLEAN:
- case Instruction::IGET_BYTE:
- case Instruction::IGET_CHAR:
- case Instruction::IGET_SHORT:
- case Instruction::IGET_WIDE:
- return AnalyseIGetMethod(verifier, method);
- case Instruction::IPUT:
- case Instruction::IPUT_OBJECT:
- case Instruction::IPUT_BOOLEAN:
- case Instruction::IPUT_BYTE:
- case Instruction::IPUT_CHAR:
- case Instruction::IPUT_SHORT:
- case Instruction::IPUT_WIDE:
- return AnalyseIPutMethod(verifier, method);
- default:
- return false;
- }
-}
-
bool DexFileMethodInliner::IsIntrinsic(uint32_t method_index) {
ReaderMutexLock mu(Thread::Current(), lock_);
auto it = inline_methods_.find(method_index);
@@ -543,160 +484,4 @@ bool DexFileMethodInliner::AddInlineMethod(int32_t method_idx, const InlineMetho
}
}
-bool DexFileMethodInliner::AnalyseReturnMethod(const DexFile::CodeItem* code_item,
- InlineMethod* result) {
- const Instruction* return_instruction = Instruction::At(code_item->insns_);
- Instruction::Code return_opcode = return_instruction->Opcode();
- uint16_t size = (return_opcode == Instruction::RETURN_WIDE) ? kLong : kWord;
- uint16_t is_object = (return_opcode == Instruction::RETURN_OBJECT) ? 1u : 0u;
- uint32_t reg = return_instruction->VRegA_11x();
- uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
- DCHECK_GE(reg, arg_start);
- DCHECK_LT(size == kLong ? reg + 1 : reg, code_item->registers_size_);
-
- result->opcode = kInlineOpReturnArg;
- result->flags = kInlineSpecial;
- InlineReturnArgData* data = &result->d.return_data;
- data->arg = reg - arg_start;
- data->op_size = size;
- data->is_object = is_object;
- data->reserved = 0u;
- data->reserved2 = 0u;
- return true;
-}
-
-bool DexFileMethodInliner::AnalyseConstMethod(const DexFile::CodeItem* code_item,
- InlineMethod* result) {
- const Instruction* instruction = Instruction::At(code_item->insns_);
- const Instruction* return_instruction = instruction->Next();
- Instruction::Code return_opcode = return_instruction->Opcode();
- if (return_opcode != Instruction::RETURN &&
- return_opcode != Instruction::RETURN_OBJECT) {
- return false;
- }
-
- uint32_t return_reg = return_instruction->VRegA_11x();
- DCHECK_LT(return_reg, code_item->registers_size_);
-
- uint32_t vA, vB, dummy;
- uint64_t dummy_wide;
- instruction->Decode(vA, vB, dummy_wide, dummy, nullptr);
- if (instruction->Opcode() == Instruction::CONST_HIGH16) {
- vB <<= 16;
- }
- DCHECK_LT(vA, code_item->registers_size_);
- if (vA != return_reg) {
- return false; // Not returning the value set by const?
- }
- if (return_opcode == Instruction::RETURN_OBJECT && vB != 0) {
- return false; // Returning non-null reference constant?
- }
- result->opcode = kInlineOpNonWideConst;
- result->flags = kInlineSpecial;
- result->d.data = static_cast<uint64_t>(vB);
- return true;
-}
-
-bool DexFileMethodInliner::AnalyseIGetMethod(verifier::MethodVerifier* verifier,
- InlineMethod* result) {
- const DexFile::CodeItem* code_item = verifier->CodeItem();
- const Instruction* instruction = Instruction::At(code_item->insns_);
- Instruction::Code opcode = instruction->Opcode();
- DCHECK_LT(static_cast<size_t>(opcode - Instruction::IGET), arraysize(kIGetIPutOpSizes));
- uint16_t size = kIGetIPutOpSizes[opcode - Instruction::IGET];
-
- const Instruction* return_instruction = instruction->Next();
- Instruction::Code return_opcode = return_instruction->Opcode();
- if (!(return_opcode == Instruction::RETURN && size != kLong) &&
- !(return_opcode == Instruction::RETURN_WIDE && size == kLong) &&
- !(return_opcode == Instruction::RETURN_OBJECT && opcode == Instruction::IGET_OBJECT)) {
- return false;
- }
-
- uint32_t return_reg = return_instruction->VRegA_11x();
- DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1 : return_reg,
- code_item->registers_size_);
-
- uint32_t dst_reg = instruction->VRegA_22c();
- uint32_t object_reg = instruction->VRegB_22c();
- uint32_t field_idx = instruction->VRegC_22c();
- uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
- DCHECK_GE(object_reg, arg_start);
- DCHECK_LT(object_reg, code_item->registers_size_);
- DCHECK_LT(size == kLong ? dst_reg + 1 : dst_reg, code_item->registers_size_);
- if (dst_reg != return_reg) {
- return false; // Not returning the value retrieved by IGET?
- }
-
- if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) {
- // TODO: Support inlining IGET on other register than "this".
- return false;
- }
-
- if (!CompilerDriver::ComputeSpecialAccessorInfo(field_idx, false, verifier,
- &result->d.ifield_data)) {
- return false;
- }
-
- result->opcode = kInlineOpIGet;
- result->flags = kInlineSpecial;
- InlineIGetIPutData* data = &result->d.ifield_data;
- data->op_size = size;
- data->is_object = (opcode == Instruction::IGET_OBJECT) ? 1u : 0u;
- data->object_arg = object_reg - arg_start; // Allow IGET on any register, not just "this".
- data->src_arg = 0;
- data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
- data->reserved = 0;
- return true;
-}
-
-bool DexFileMethodInliner::AnalyseIPutMethod(verifier::MethodVerifier* verifier,
- InlineMethod* result) {
- const DexFile::CodeItem* code_item = verifier->CodeItem();
- const Instruction* instruction = Instruction::At(code_item->insns_);
- Instruction::Code opcode = instruction->Opcode();
- DCHECK_LT(static_cast<size_t>(opcode - Instruction::IPUT), arraysize(kIGetIPutOpSizes));
- uint16_t size = kIGetIPutOpSizes[opcode - Instruction::IPUT];
-
- const Instruction* return_instruction = instruction->Next();
- if (return_instruction->Opcode() != Instruction::RETURN_VOID) {
- // TODO: Support returning an argument.
- // This is needed by builder classes and generated accessor setters.
- // builder.setX(value): iput value, this, fieldX; return-object this;
- // object.access$nnn(value): iput value, this, fieldX; return value;
- // Use InlineIGetIPutData::reserved to hold the information.
- return false;
- }
-
- uint32_t src_reg = instruction->VRegA_22c();
- uint32_t object_reg = instruction->VRegB_22c();
- uint32_t field_idx = instruction->VRegC_22c();
- uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
- DCHECK_GE(object_reg, arg_start);
- DCHECK_LT(object_reg, code_item->registers_size_);
- DCHECK_GE(src_reg, arg_start);
- DCHECK_LT(size == kLong ? src_reg + 1 : src_reg, code_item->registers_size_);
-
- if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) {
- // TODO: Support inlining IPUT on other register than "this".
- return false;
- }
-
- if (!CompilerDriver::ComputeSpecialAccessorInfo(field_idx, true, verifier,
- &result->d.ifield_data)) {
- return false;
- }
-
- result->opcode = kInlineOpIPut;
- result->flags = kInlineSpecial;
- InlineIGetIPutData* data = &result->d.ifield_data;
- data->op_size = size;
- data->is_object = (opcode == Instruction::IPUT_OBJECT) ? 1u : 0u;
- data->object_arg = object_reg - arg_start; // Allow IPUT on any register, not just "this".
- data->src_arg = src_reg - arg_start;
- data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
- data->reserved = 0;
- return true;
-}
-
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 4aff01c066..a6d4cab393 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -23,6 +23,7 @@
#include "safe_map.h"
#include "dex/compiler_enums.h"
#include "dex_file.h"
+#include "quick/inline_method_analyser.h"
namespace art {
@@ -33,102 +34,6 @@ class MethodVerifier;
struct CallInfo;
class Mir2Lir;
-enum InlineMethodOpcode : uint16_t {
- kIntrinsicDoubleCvt,
- kIntrinsicFloatCvt,
- kIntrinsicReverseBytes,
- kIntrinsicAbsInt,
- kIntrinsicAbsLong,
- kIntrinsicAbsFloat,
- kIntrinsicAbsDouble,
- kIntrinsicMinMaxInt,
- kIntrinsicSqrt,
- kIntrinsicCharAt,
- kIntrinsicCompareTo,
- kIntrinsicIsEmptyOrLength,
- kIntrinsicIndexOf,
- kIntrinsicCurrentThread,
- kIntrinsicPeek,
- kIntrinsicPoke,
- kIntrinsicCas,
- kIntrinsicUnsafeGet,
- kIntrinsicUnsafePut,
-
- kInlineOpNop,
- kInlineOpReturnArg,
- kInlineOpNonWideConst,
- kInlineOpIGet,
- kInlineOpIPut,
-};
-std::ostream& operator<<(std::ostream& os, const InlineMethodOpcode& rhs);
-
-enum InlineMethodFlags : uint16_t {
- kNoInlineMethodFlags = 0x0000,
- kInlineIntrinsic = 0x0001,
- kInlineSpecial = 0x0002,
-};
-
-// IntrinsicFlags are stored in InlineMethod::d::raw_data
-enum IntrinsicFlags {
- kIntrinsicFlagNone = 0,
-
- // kIntrinsicMinMaxInt
- kIntrinsicFlagMax = kIntrinsicFlagNone,
- kIntrinsicFlagMin = 1,
-
- // kIntrinsicIsEmptyOrLength
- kIntrinsicFlagLength = kIntrinsicFlagNone,
- kIntrinsicFlagIsEmpty = kIntrinsicFlagMin,
-
- // kIntrinsicIndexOf
- kIntrinsicFlagBase0 = kIntrinsicFlagMin,
-
- // kIntrinsicUnsafeGet, kIntrinsicUnsafePut, kIntrinsicUnsafeCas
- kIntrinsicFlagIsLong = kIntrinsicFlagMin,
- // kIntrinsicUnsafeGet, kIntrinsicUnsafePut
- kIntrinsicFlagIsVolatile = 2,
- // kIntrinsicUnsafePut, kIntrinsicUnsafeCas
- kIntrinsicFlagIsObject = 4,
- // kIntrinsicUnsafePut
- kIntrinsicFlagIsOrdered = 8,
-};
-
-// Check that OpSize fits into 3 bits (at least the values the inliner uses).
-COMPILE_ASSERT(kWord < 8 && kLong < 8 && kSingle < 8 && kDouble < 8 && kUnsignedHalf < 8 &&
- kSignedHalf < 8 && kUnsignedByte < 8 && kSignedByte < 8, op_size_field_too_narrow);
-
-struct InlineIGetIPutData {
- uint16_t op_size : 3; // OpSize
- uint16_t is_object : 1;
- uint16_t object_arg : 4;
- uint16_t src_arg : 4; // iput only
- uint16_t method_is_static : 1;
- uint16_t reserved : 3;
- uint16_t field_idx;
- uint32_t is_volatile : 1;
- uint32_t field_offset : 31;
-};
-COMPILE_ASSERT(sizeof(InlineIGetIPutData) == sizeof(uint64_t), InvalidSizeOfInlineIGetIPutData);
-
-struct InlineReturnArgData {
- uint16_t arg;
- uint16_t op_size : 3; // OpSize
- uint16_t is_object : 1;
- uint16_t reserved : 12;
- uint32_t reserved2;
-};
-COMPILE_ASSERT(sizeof(InlineReturnArgData) == sizeof(uint64_t), InvalidSizeOfInlineReturnArgData);
-
-struct InlineMethod {
- InlineMethodOpcode opcode;
- InlineMethodFlags flags;
- union {
- uint64_t data;
- InlineIGetIPutData ifield_data;
- InlineReturnArgData return_data;
- } d;
-};
-
/**
* Handles inlining of methods from a particular DexFile.
*
@@ -157,17 +62,6 @@ class DexFileMethodInliner {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
/**
- * Analyse method code to determine if the method is a candidate for inlining.
- * If it is, record the inlining data.
- *
- * @param verifier the method verifier holding data about the method to analyse.
- * @param method placeholder for the inline method data.
- * @return true if the method is a candidate for inlining, false otherwise.
- */
- bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
-
- /**
* Check whether a particular method index corresponds to an intrinsic function.
*/
bool IsIntrinsic(uint32_t method_index) LOCKS_EXCLUDED(lock_);
@@ -392,13 +286,6 @@ class DexFileMethodInliner {
bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) LOCKS_EXCLUDED(lock_);
- static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
- static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
- static bool AnalyseIGetMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool AnalyseIPutMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
ReaderWriterMutex lock_;
/*
* Maps method indexes (for the particular DexFile) to Intrinsic defintions.
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 00c51d40d3..1e2199187f 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -66,12 +66,45 @@ LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKin
return branch;
}
+
/* Perform null-check on a register. */
-LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) {
- if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
+LIR* Mir2Lir::GenNullCheck(int m_reg, int opt_flags) {
+ if (Runtime::Current()->ExplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return NULL;
+ }
+ return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
+ }
+ return nullptr;
+}
+
+void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) {
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ MarkSafepointPC(last_lir_insn_);
+ }
+}
+
+void Mir2Lir::MarkPossibleStackOverflowException() {
+ if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
+ MarkSafepointPC(last_lir_insn_);
+ }
+}
+
+void Mir2Lir::ForceImplicitNullCheck(int reg, int opt_flags) {
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ // Force an implicit null check by performing a memory operation (load) from the given
+ // register with offset 0. This will cause a signal if the register contains 0 (null).
+ int tmp = AllocTemp();
+ LIR* load = LoadWordDisp(reg, 0, tmp);
+ FreeTemp(tmp);
+ MarkSafepointPC(load);
}
- return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
}
/* Perform check on two registers */
@@ -581,24 +614,6 @@ void Mir2Lir::HandleSuspendLaunchPads() {
}
}
-void Mir2Lir::HandleIntrinsicLaunchPads() {
- int num_elems = intrinsic_launchpads_.Size();
- for (int i = 0; i < num_elems; i++) {
- ResetRegPool();
- ResetDefTracking();
- LIR* lab = intrinsic_launchpads_.Get(i);
- CallInfo* info = reinterpret_cast<CallInfo*>(UnwrapPointer(lab->operands[0]));
- current_dalvik_offset_ = info->offset;
- AppendLIR(lab);
- // NOTE: GenInvoke handles MarkSafepointPC
- GenInvoke(info);
- LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[2]));
- if (resume_lab != NULL) {
- OpUnconditionalBranch(resume_lab);
- }
- }
-}
-
void Mir2Lir::HandleThrowLaunchPads() {
int num_elems = throw_launchpads_.Size();
for (int i = 0; i < num_elems; i++) {
@@ -698,12 +713,14 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
LoadBaseDispWide(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ rl_result.reg.GetReg(),
+ rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
@@ -721,9 +738,10 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
LoadBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low);
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
@@ -757,25 +775,27 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
if (is_long_or_double) {
int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
reg_ptr = AllocTemp();
OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value());
if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kStoreStore);
}
StoreBaseDisp(rl_obj.reg.GetReg(), field_info.FieldOffset().Int32Value(),
- rl_src.reg.GetReg(), kWord);
+ rl_src.reg.GetReg(), kWord);
+ MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
GenMemBarrier(kLoadLoad);
}
@@ -1927,12 +1947,6 @@ void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
* functions
*/
FlushAllRegs(); /* Send everything to home location */
- if (rl_src.wide) {
- LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
- rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
- } else {
- LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
- }
CallRuntimeHelperRegLocation(func_offset, rl_src, false);
if (rl_dest.wide) {
RegLocation rl_result;
@@ -1947,31 +1961,53 @@ void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTest(int opt_flags) {
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
- return;
+ if (Runtime::Current()->ExplicitSuspendChecks()) {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ return;
+ }
+ FlushAllRegs();
+ LIR* branch = OpTestSuspend(NULL);
+ LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
+ LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
+ current_dalvik_offset_);
+ branch->target = target;
+ suspend_launchpads_.Insert(target);
+ } else {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ return;
+ }
+ FlushAllRegs(); // TODO: needed?
+ LIR* inst = CheckSuspendUsingLoad();
+ MarkSafepointPC(inst);
}
- FlushAllRegs();
- LIR* branch = OpTestSuspend(NULL);
- LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
- LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
- current_dalvik_offset_);
- branch->target = target;
- suspend_launchpads_.Insert(target);
}
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ if (Runtime::Current()->ExplicitSuspendChecks()) {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ OpUnconditionalBranch(target);
+ return;
+ }
+ OpTestSuspend(target);
+ LIR* launch_pad =
+ RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
+ current_dalvik_offset_);
+ FlushAllRegs();
+ OpUnconditionalBranch(launch_pad);
+ suspend_launchpads_.Insert(launch_pad);
+ } else {
+ // For the implicit suspend check, just perform the trigger
+ // load and branch to the target.
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ OpUnconditionalBranch(target);
+ return;
+ }
+ FlushAllRegs();
+ LIR* inst = CheckSuspendUsingLoad();
+ MarkSafepointPC(inst);
OpUnconditionalBranch(target);
- return;
}
- OpTestSuspend(target);
- LIR* launch_pad =
- RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
- current_dalvik_offset_);
- FlushAllRegs();
- OpUnconditionalBranch(launch_pad);
- suspend_launchpads_.Insert(launch_pad);
}
/* Call out to helper assembly routine that will null check obj and then lock it. */
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1907012679..92c13cef2a 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -34,6 +34,32 @@ namespace art {
* and "op" calls may be used here.
*/
+void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) {
+ class IntrinsicLaunchpadPath : public Mir2Lir::LIRSlowPath {
+ public:
+ IntrinsicLaunchpadPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
+ : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
+ }
+
+ void Compile() {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ LIR* label = GenerateTargetLabel();
+ label->opcode = kPseudoIntrinsicRetry;
+ // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
+ m2l_->GenInvokeNoInline(info_);
+ if (cont_ != nullptr) {
+ m2l_->OpUnconditionalBranch(cont_);
+ }
+ }
+
+ private:
+ CallInfo* const info_;
+ };
+
+ AddSlowPath(new (arena_) IntrinsicLaunchpadPath(this, info, branch, resume));
+}
+
/*
* To save scheduling time, helper calls are broken into two parts: generation of
* the helper target address, and the actuall call to the helper. Because x86
@@ -76,10 +102,11 @@ void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool sa
void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
bool safepoint_pc) {
int r_tgt = CallHelperSetup(helper_offset);
- if (arg0.wide == 0) {
- LoadValueDirectFixed(arg0, TargetReg(kArg0));
+ if (arg0.wide) {
+ LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
+ arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
} else {
- LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
+ LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
}
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
@@ -386,14 +413,12 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
}
} else if (cu->instruction_set != kX86) {
- CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadCodeAddress(target_method.dex_method_index, type, kInvokeTgt);
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
}
if (direct_method != static_cast<unsigned int>(-1)) {
cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
} else {
- CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadMethodAddress(target_method.dex_method_index, type, kArg0);
+ cg->LoadMethodAddress(target_method, type, kArg0);
}
break;
default:
@@ -413,9 +438,8 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
if (direct_code != static_cast<unsigned int>(-1)) {
cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
} else if (cu->instruction_set != kX86) {
- CHECK_EQ(cu->dex_file, target_method.dex_file);
CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
- cg->LoadCodeAddress(target_method.dex_method_index, type, kInvokeTgt);
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
}
}
break;
@@ -466,10 +490,11 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
break;
}
case 1: // Is "this" null? [use kArg1]
- cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+ cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
+ cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
@@ -508,7 +533,6 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
switch (state) {
case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
- CHECK_EQ(cu->dex_file, target_method.dex_file);
CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
if (cu->instruction_set == kX86) {
@@ -521,10 +545,11 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
break;
}
case 2: // Is "this" null? [use kArg1]
- cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+ cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// Get this->klass_ [use kArg1, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
cg->TargetReg(kInvokeTgt));
+ cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
@@ -731,7 +756,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
type, skip_this);
if (pcrLabel) {
- *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+ *pcrLabel = GenNullCheck(TargetReg(kArg1), info->opt_flags);
}
return call_state;
}
@@ -935,7 +960,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
if (pcrLabel) {
- *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+ *pcrLabel = GenNullCheck(TargetReg(kArg1), info->opt_flags);
}
return call_state;
}
@@ -982,9 +1007,9 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
rl_idx = LoadValue(rl_idx, kCoreReg);
}
int reg_max;
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
+ GenNullCheck(rl_obj.reg.GetReg(), info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
- LIR* launch_pad = NULL;
+ LIR* range_check_branch = nullptr;
int reg_off = INVALID_REG;
int reg_ptr = INVALID_REG;
if (cu_->instruction_set != kX86) {
@@ -993,30 +1018,29 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
if (range_check) {
reg_max = AllocTemp();
LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
+ MarkPossibleNullPointerException(info->opt_flags);
}
LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
+ MarkPossibleNullPointerException(info->opt_flags);
LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
FreeTemp(reg_max);
- OpCondBranch(kCondUge, launch_pad);
+ range_check_branch = OpCondBranch(kCondUge, nullptr);
}
OpRegImm(kOpAdd, reg_ptr, data_offset);
} else {
if (range_check) {
// On x86, we can compare to memory directly
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
if (rl_idx.is_const) {
- OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
- mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
+ range_check_branch = OpCmpMemImmBranch(
+ kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
+ mir_graph_->ConstantValue(rl_idx.orig_sreg), nullptr);
} else {
OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
- OpCondBranch(kCondUge, launch_pad);
+ range_check_branch = OpCondBranch(kCondUge, nullptr);
}
}
reg_off = AllocTemp();
@@ -1045,10 +1069,10 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
if (range_check) {
- launch_pad->operands[2] = 0; // no resumption
+ DCHECK(range_check_branch != nullptr);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've already null checked.
+ AddIntrinsicLaunchpad(info, range_check_branch);
}
- // Record that we've already inlined & null checked
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
return true;
}
@@ -1063,8 +1087,10 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
rl_obj = LoadValue(rl_obj, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
- LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg());
+ GenNullCheck(rl_obj.reg.GetReg(), info->opt_flags);
+ LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(),
+ rl_result.reg.GetReg());
+ MarkPossibleNullPointerException(info->opt_flags);
if (is_empty) {
// dst = (dst == 0);
if (cu_->instruction_set == kThumb2) {
@@ -1232,7 +1258,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
}
/*
- * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
+ * Fast String.indexOf(I) & (II). Tests for simple case of char <= 0xFFFF,
* otherwise bails to standard library code.
*/
bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
@@ -1240,14 +1266,19 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// TODO - add Mips implementation
return false;
}
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_char = info->args[1];
+ if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
+ // Code point beyond 0xFFFF. Punt to the real String.indexOf().
+ return false;
+ }
+
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
int reg_ptr = TargetReg(kArg0);
int reg_char = TargetReg(kArg1);
int reg_start = TargetReg(kArg2);
- RegLocation rl_obj = info->args[0];
- RegLocation rl_char = info->args[1];
LoadValueDirectFixed(rl_obj, reg_ptr);
LoadValueDirectFixed(rl_char, reg_char);
if (zero_based) {
@@ -1257,16 +1288,21 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
LoadValueDirectFixed(rl_start, reg_start);
}
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf));
- GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
- OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
+ GenNullCheck(reg_ptr, info->opt_flags);
+ LIR* high_code_point_branch =
+ rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
// NOTE: not a safepoint
OpReg(kOpBlx, r_tgt);
- LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
- launch_pad->operands[2] = WrapPointer(resume_tgt);
- // Record that we've already inlined & null checked
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ if (!rl_char.is_const) {
+ // Add the slow path for code points beyond 0xFFFF.
+ DCHECK(high_code_point_branch != nullptr);
+ LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
+ AddIntrinsicLaunchpad(info, high_code_point_branch, resume_tgt);
+ } else {
+ DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
+ DCHECK(high_code_point_branch == nullptr);
+ }
RegLocation rl_return = GetReturn(false);
RegLocation rl_dest = InlineTarget(info);
StoreValue(rl_dest, rl_return);
@@ -1290,20 +1326,17 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
LoadValueDirectFixed(rl_cmp, reg_cmp);
int r_tgt = (cu_->instruction_set != kX86) ?
LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
- GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
+ GenNullCheck(reg_this, info->opt_flags);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// TUNING: check if rl_cmp.s_reg_low is already null checked
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
- OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
+ LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
+ AddIntrinsicLaunchpad(info, cmp_null_check_branch);
// NOTE: not a safepoint
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
}
- launch_pad->operands[2] = 0; // No return possible
- // Record that we've already inlined & null checked
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
RegLocation rl_return = GetReturn(false);
RegLocation rl_dest = InlineTarget(info);
StoreValue(rl_dest, rl_return);
@@ -1390,13 +1423,15 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
}
void Mir2Lir::GenInvoke(CallInfo* info) {
- if (!(info->opt_flags & MIR_INLINED)) {
- DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
- if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
- ->GenIntrinsic(this, info)) {
- return;
- }
+ DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+ if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+ ->GenIntrinsic(this, info)) {
+ return;
}
+ GenInvokeNoInline(info);
+}
+
+void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
int call_state = 0;
LIR* null_ck;
LIR** p_null_ck = NULL;
@@ -1457,8 +1492,7 @@ void Mir2Lir::GenInvoke(CallInfo* info) {
if (method_info.DirectCode() == static_cast<uintptr_t>(-1)) {
// We can have the linker fixup a call relative.
call_inst =
- reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(
- target_method.dex_method_index, info->type);
+ reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
} else {
call_inst = OpMem(kOpBlx, TargetReg(kArg0),
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 61eb68dc21..28ebe0e460 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -30,6 +30,7 @@ class MipsMir2Lir : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
int LoadHelper(ThreadOffset offset);
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index fec801bb4a..9fcc8bba41 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -471,7 +471,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
int reg_ptr = AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -548,7 +548,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4f495ee0fc..b7fb2f4512 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -512,6 +512,14 @@ int MipsMir2Lir::LoadHelper(ThreadOffset offset) {
return r_T9;
}
+LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
+ int tmp = AllocTemp();
+ LoadWordDisp(rMIPS_SELF, Thread::ThreadSuspendTriggerOffset().Int32Value(), tmp);
+ LIR *inst = LoadWordDisp(tmp, 0, tmp);
+ FreeTemp(tmp);
+ return inst;
+}
+
void MipsMir2Lir::SpillCoreRegs() {
if (num_core_spills_ == 0) {
return;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 40ed5ef535..538c292c41 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -123,8 +123,8 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
return false;
}
- DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong.
- bool wide = (data.op_size == kLong);
+ bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
+ // The inliner doesn't distinguish kDouble or kFloat, use shorty.
bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D';
// Point of no return - no aborts after this
@@ -151,8 +151,7 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
return false;
}
- DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong.
- bool wide = (data.op_size == kLong);
+ bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
// Point of no return - no aborts after this
GenPrintLabel(mir);
@@ -173,7 +172,7 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
if (data.is_volatile) {
GenMemBarrier(kLoadLoad);
}
- if (data.is_object) {
+ if (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT)) {
MarkGCCard(reg_src, reg_obj);
}
return true;
@@ -181,8 +180,8 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
const InlineReturnArgData& data = special.d.return_data;
- DCHECK_NE(data.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong.
- bool wide = (data.op_size == kLong);
+ bool wide = (data.is_wide != 0u);
+ // The inliner doesn't distinguish kDouble or kFloat, use shorty.
bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D';
// Point of no return - no aborts after this
@@ -438,7 +437,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kCoreReg);
- GenNullCheck(rl_src[0].s_reg_low, rl_src[0].reg.GetReg(), opt_flags);
+ GenNullCheck(rl_src[0].reg.GetReg(), opt_flags);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
LoadWordDisp(rl_src[0].reg.GetReg(), len_offset, rl_result.reg.GetReg());
StoreValue(rl_dest, rl_result);
@@ -1089,8 +1088,6 @@ void Mir2Lir::MethodMIR2LIR() {
HandleSuspendLaunchPads();
HandleThrowLaunchPads();
-
- HandleIntrinsicLaunchPads();
}
//
@@ -1098,10 +1095,10 @@ void Mir2Lir::MethodMIR2LIR() {
//
LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel() {
- LIR* target = m2l_->RawLIR(current_dex_pc_, kPseudoTargetLabel);
- m2l_->AppendLIR(target);
- fromfast_->target = target;
m2l_->SetCurrentDexPc(current_dex_pc_);
+ LIR* target = m2l_->NewLIR0(kPseudoTargetLabel);
+ fromfast_->target = target;
return target;
}
+
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6955577670..42d7f59b03 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -82,6 +82,7 @@ typedef uint32_t CodeOffset; // Native code offset in bytes.
#define REG_USE_SP (1ULL << kRegUseSP)
#define SETS_CCODES (1ULL << kSetsCCodes)
#define USES_CCODES (1ULL << kUsesCCodes)
+#define USE_FP_STACK (1ULL << kUseFpStack)
// Common combo register usage patterns.
#define REG_DEF01 (REG_DEF0 | REG_DEF1)
@@ -535,18 +536,21 @@ class Mir2Lir : public Backend {
RegisterInfo* GetRegInfo(int reg);
// Shared by all targets - implemented in gen_common.cc.
+ void AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume = nullptr);
bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit);
bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
void HandleSuspendLaunchPads();
- void HandleIntrinsicLaunchPads();
void HandleThrowLaunchPads();
void HandleSlowPaths();
void GenBarrier();
LIR* GenCheck(ConditionCode c_code, ThrowKind kind);
LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val,
ThrowKind kind);
- LIR* GenNullCheck(int s_reg, int m_reg, int opt_flags);
+ LIR* GenNullCheck(int m_reg, int opt_flags);
+ void MarkPossibleNullPointerException(int opt_flags);
+ void MarkPossibleStackOverflowException();
+ void ForceImplicitNullCheck(int reg, int opt_flags);
LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
ThrowKind kind);
void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
@@ -637,6 +641,7 @@ class Mir2Lir : public Backend {
RegLocation arg2,
bool safepoint_pc);
void GenInvoke(CallInfo* info);
+ void GenInvokeNoInline(CallInfo* info);
void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
NextCallInsn next_call_insn,
@@ -751,22 +756,22 @@ class Mir2Lir : public Backend {
/*
* @brief Load the address of the dex method into the register.
- * @param dex_method_index The index of the method to be invoked.
+ * @param target_method The MethodReference of the method to be invoked.
* @param type How the method will be invoked.
* @param register that will contain the code address.
* @note register will be passed to TargetReg to get physical register.
*/
- void LoadCodeAddress(int dex_method_index, InvokeType type,
+ void LoadCodeAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg);
/*
* @brief Load the Method* of a dex method into the register.
- * @param dex_method_index The index of the method to be invoked.
+ * @param target_method The MethodReference of the method to be invoked.
* @param type How the method will be invoked.
* @param register that will contain the code address.
* @note register will be passed to TargetReg to get physical register.
*/
- virtual void LoadMethodAddress(int dex_method_index, InvokeType type,
+ virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg);
/*
@@ -795,6 +800,7 @@ class Mir2Lir : public Backend {
virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
virtual int LoadHelper(ThreadOffset offset) = 0;
+ virtual LIR* CheckSuspendUsingLoad() = 0;
virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg) = 0;
@@ -1196,7 +1202,6 @@ class Mir2Lir : public Backend {
GrowableArray<FillArrayData*> fill_array_data_;
GrowableArray<LIR*> throw_launchpads_;
GrowableArray<LIR*> suspend_launchpads_;
- GrowableArray<LIR*> intrinsic_launchpads_;
GrowableArray<RegisterInfo*> tempreg_info_;
GrowableArray<RegisterInfo*> reginfo_map_;
GrowableArray<void*> pointer_storage_;
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 538ce0d183..f6c8a00dff 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -298,10 +298,10 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1 }, "PsllqRI", "!0r,!1d" },
{ kX86SqrtsdRR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0xF2, 0, 0x0F, 0x51, 0, 0, 0, 0 }, "SqrtsdRR", "!0r,!1r" },
- { kX86Fild32M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0, { 0x0, 0, 0xDB, 0x00, 0, 0, 0, 0 }, "Fild32M", "[!0r,!1d]" },
- { kX86Fild64M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0, { 0x0, 0, 0xDF, 0x00, 0, 5, 0, 0 }, "Fild64M", "[!0r,!1d]" },
- { kX86Fstp32M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0, { 0x0, 0, 0xD9, 0x00, 0, 3, 0, 0 }, "FstpdM", "[!0r,!1d]" },
- { kX86Fstp64M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0, { 0x0, 0, 0xDD, 0x00, 0, 3, 0, 0 }, "FstpdM", "[!0r,!1d]" },
+ { kX86Fild32M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDB, 0x00, 0, 0, 0, 0 }, "Fild32M", "[!0r,!1d]" },
+ { kX86Fild64M, kMem, IS_LOAD | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDF, 0x00, 0, 5, 0, 0 }, "Fild64M", "[!0r,!1d]" },
+ { kX86Fstp32M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xD9, 0x00, 0, 3, 0, 0 }, "FstpsM", "[!0r,!1d]" },
+ { kX86Fstp64M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 3, 0, 0 }, "FstpdM", "[!0r,!1d]" },
EXT_0F_ENCODING_MAP(Movups, 0x0, 0x10, REG_DEF0),
{ kX86MovupsMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x0, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovupsMR", "[!0r+!1d],!2r" },
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 7cc2c08b96..275a2d934a 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -30,6 +30,7 @@ class X86Mir2Lir : public Mir2Lir {
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
int LoadHelper(ThreadOffset offset);
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
int s_reg);
@@ -274,12 +275,12 @@ class X86Mir2Lir : public Mir2Lir {
/*
* @brief Load the Method* of a dex method into the register.
- * @param dex_method_index The index of the method to be invoked.
+ * @param target_method The MethodReference of the method to be invoked.
* @param type How the method will be invoked.
* @param register that will contain the code address.
* @note register will be passed to TargetReg to get physical register.
*/
- void LoadMethodAddress(int dex_method_index, InvokeType type,
+ void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg);
/*
@@ -292,11 +293,11 @@ class X86Mir2Lir : public Mir2Lir {
/*
* @brief Generate a relative call to the method that will be patched at link time.
- * @param dex_method_index The index of the method to be invoked.
+ * @param target_method The MethodReference of the method to be invoked.
* @param type How the method will be invoked.
* @returns Call instruction
*/
- LIR * CallWithLinkerFixup(int dex_method_index, InvokeType type);
+ LIR * CallWithLinkerFixup(const MethodReference& target_method, InvokeType type);
/*
* @brief Handle x86 specific literals
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index d7eeeac110..a67c43c90e 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1298,7 +1298,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
@@ -1352,7 +1352,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.reg.GetReg(), opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 083fccb2b4..9994927d08 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -191,6 +191,11 @@ void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
SetupRegMask(&lir->u.m.use_mask, rDI);
SetupRegMask(&lir->u.m.def_mask, rDI);
}
+
+ if (flags & USE_FP_STACK) {
+ lir->u.m.use_mask |= ENCODE_X86_FP_STACK;
+ lir->u.m.def_mask |= ENCODE_X86_FP_STACK;
+ }
}
/* For dumping instructions */
@@ -552,6 +557,11 @@ int X86Mir2Lir::LoadHelper(ThreadOffset offset) {
return INVALID_REG;
}
+LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
+ LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
+ return nullptr;
+}
+
uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
DCHECK(!IsPseudoLirOp(opcode));
return X86Mir2Lir::EncodingMap[opcode].flags;
@@ -832,19 +842,22 @@ void X86Mir2Lir::Materialize() {
Mir2Lir::Materialize();
}
-void X86Mir2Lir::LoadMethodAddress(int dex_method_index, InvokeType type,
+void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
/*
* For x86, just generate a 32 bit move immediate instruction, that will be filled
* in at 'link time'. For now, put a unique value based on target to ensure that
* code deduplication works.
*/
- const DexFile::MethodId& id = cu_->dex_file->GetMethodId(dex_method_index);
- uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
+ int target_method_idx = target_method.dex_method_index;
+ const DexFile* target_dex_file = target_method.dex_file;
+ const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
+ uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
- // Generate the move instruction with the unique pointer and save index and type.
+ // Generate the move instruction with the unique pointer and save index, dex_file, and type.
LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg),
- static_cast<int>(ptr), dex_method_index, type);
+ static_cast<int>(target_method_id_ptr), target_method_idx,
+ WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(move);
method_address_insns_.Insert(move);
}
@@ -865,18 +878,20 @@ void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic
class_type_address_insns_.Insert(move);
}
-LIR *X86Mir2Lir::CallWithLinkerFixup(int dex_method_index, InvokeType type) {
+LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
/*
* For x86, just generate a 32 bit call relative instruction, that will be filled
* in at 'link time'. For now, put a unique value based on target to ensure that
* code deduplication works.
*/
- const DexFile::MethodId& id = cu_->dex_file->GetMethodId(dex_method_index);
- uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
-
- // Generate the call instruction with the unique pointer and save index and type.
- LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(ptr), dex_method_index,
- type);
+ int target_method_idx = target_method.dex_method_index;
+ const DexFile* target_dex_file = target_method.dex_file;
+ const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
+ uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
+
+ // Generate the call instruction with the unique pointer and save index, dex_file, and type.
+ LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
+ target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(call);
call_method_insns_.Insert(call);
return call;
@@ -892,13 +907,16 @@ void X86Mir2Lir::InstallLiteralPools() {
for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
LIR* p = method_address_insns_.Get(i);
DCHECK_EQ(p->opcode, kX86Mov32RI);
- uint32_t target = p->operands[2];
+ uint32_t target_method_idx = p->operands[2];
+ const DexFile* target_dex_file =
+ reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
// The offset to patch is the last 4 bytes of the instruction.
int patch_offset = p->offset + p->flags.size - 4;
cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
cu_->method_idx, cu_->invoke_type,
- target, static_cast<InvokeType>(p->operands[3]),
+ target_method_idx, target_dex_file,
+ static_cast<InvokeType>(p->operands[4]),
patch_offset);
}
@@ -906,25 +924,28 @@ void X86Mir2Lir::InstallLiteralPools() {
for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
LIR* p = class_type_address_insns_.Get(i);
DCHECK_EQ(p->opcode, kX86Mov32RI);
- uint32_t target = p->operands[2];
+ uint32_t target_method_idx = p->operands[2];
// The offset to patch is the last 4 bytes of the instruction.
int patch_offset = p->offset + p->flags.size - 4;
cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
- cu_->method_idx, target, patch_offset);
+ cu_->method_idx, target_method_idx, patch_offset);
}
// And now the PC-relative calls to methods.
for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
LIR* p = call_method_insns_.Get(i);
DCHECK_EQ(p->opcode, kX86CallI);
- uint32_t target = p->operands[1];
+ uint32_t target_method_idx = p->operands[1];
+ const DexFile* target_dex_file =
+ reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
// The offset to patch is the last 4 bytes of the instruction.
int patch_offset = p->offset + p->flags.size - 4;
cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
- cu_->method_idx, cu_->invoke_type, target,
- static_cast<InvokeType>(p->operands[2]),
+ cu_->method_idx, cu_->invoke_type,
+ target_method_idx, target_dex_file,
+ static_cast<InvokeType>(p->operands[3]),
patch_offset, -4 /* offset */);
}
@@ -964,22 +985,18 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Is the string non-NULL?
LoadValueDirectFixed(rl_obj, rDX);
- GenNullCheck(rl_obj.s_reg_low, rDX, info->opt_flags);
-
- // Record that we have inlined & null checked the object.
- info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ GenNullCheck(rDX, info->opt_flags);
+ info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// Does the character fit in 16 bits?
- LIR* launch_pad = nullptr;
+ LIR* launchpad_branch = nullptr;
if (rl_char.is_const) {
// We need the value in EAX.
LoadConstantNoClobber(rAX, char_value);
} else {
// Character is not a constant; compare at runtime.
LoadValueDirectFixed(rl_char, rAX);
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
- intrinsic_launchpads_.Insert(launch_pad);
- OpCmpImmBranch(kCondGt, rAX, 0xFFFF, launch_pad);
+ launchpad_branch = OpCmpImmBranch(kCondGt, rAX, 0xFFFF, nullptr);
}
// From here down, we know that we are looking for a char that fits in 16 bits.
@@ -1028,7 +1045,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
} else {
// Compare to memory to avoid a register load. Handle pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- OpRegMem(kOpCmp, rDX, rX86_SP, displacement);
+ OpRegMem(kOpCmp, rCX, rX86_SP, displacement);
length_compare = NewLIR2(kX86Jcc8, 0, kX86CondLe);
OpRegMem(kOpSub, rCX, rX86_SP, displacement);
}
@@ -1096,9 +1113,9 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
NewLIR1(kX86Pop32R, rDI);
// Out of line code returns here.
- if (launch_pad != nullptr) {
+ if (launchpad_branch != nullptr) {
LIR *return_point = NewLIR0(kPseudoTargetLabel);
- launch_pad->operands[2] = WrapPointer(return_point);
+ AddIntrinsicLaunchpad(info, launchpad_branch, return_point);
}
StoreValue(rl_dest, rl_return);
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 09cbbeec82..9fb0044e36 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -130,12 +130,14 @@ enum X86ResourceEncodingPos {
kX86GPReg0 = 0,
kX86RegSP = 4,
kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
- kX86FPRegEnd = 32,
- kX86RegEnd = kX86FPRegEnd,
+ kX86FPRegEnd = 32,
+ kX86FPStack = 33,
+ kX86RegEnd = kX86FPStack,
};
#define ENCODE_X86_REG_LIST(N) (static_cast<uint64_t>(N))
#define ENCODE_X86_REG_SP (1ULL << kX86RegSP)
+#define ENCODE_X86_FP_STACK (1ULL << kX86FPStack)
enum X86NativeRegisterPool {
r0 = 0,
diff --git a/compiler/driver/compiler_callbacks_impl.h b/compiler/driver/compiler_callbacks_impl.h
index ed6a9255b9..92adb20c1f 100644
--- a/compiler/driver/compiler_callbacks_impl.h
+++ b/compiler/driver/compiler_callbacks_impl.h
@@ -36,15 +36,7 @@ class CompilerCallbacksImpl FINAL : public CompilerCallbacks {
~CompilerCallbacksImpl() { }
bool MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE {
- bool result = verification_results_->ProcessVerifiedMethod(verifier);
- if (result) {
- MethodReference ref = verifier->GetMethodReference();
- method_inliner_map_->GetMethodInliner(ref.dex_file)
- ->AnalyseMethodCode(verifier);
- }
- return result;
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void ClassRejected(ClassReference ref) OVERRIDE {
verification_results_->AddRejectedClass(ref);
}
@@ -54,6 +46,16 @@ class CompilerCallbacksImpl FINAL : public CompilerCallbacks {
DexFileToMethodInlinerMap* const method_inliner_map_;
};
+inline bool CompilerCallbacksImpl::MethodVerified(verifier::MethodVerifier* verifier) {
+ bool result = verification_results_->ProcessVerifiedMethod(verifier);
+ if (result) {
+ MethodReference ref = verifier->GetMethodReference();
+ method_inliner_map_->GetMethodInliner(ref.dex_file)
+ ->AnalyseMethodCode(verifier);
+ }
+ return result;
+}
+
} // namespace art
#endif // ART_COMPILER_DRIVER_COMPILER_CALLBACKS_IMPL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7c4a6f7c19..c2b6f5a97d 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -26,7 +26,7 @@
#include "base/stl_util.h"
#include "base/timing_logger.h"
#include "class_linker.h"
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_driver-inl.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
@@ -324,7 +324,7 @@ extern "C" art::CompiledMethod* ArtCompileDEX(art::CompilerDriver& compiler,
CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
- CompilerBackend::Kind compiler_backend_kind,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
bool image, DescriptorSet* image_classes, size_t thread_count,
@@ -333,7 +333,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
: profile_ok_(false), compiler_options_(compiler_options),
verification_results_(verification_results),
method_inliner_map_(method_inliner_map),
- compiler_backend_(CompilerBackend::Create(compiler_backend_kind)),
+ compiler_(Compiler::Create(compiler_kind)),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
freezing_constructor_lock_("freezing constructor lock"),
@@ -371,7 +371,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
dex_to_dex_compiler_ = reinterpret_cast<DexToDexCompilerFn>(ArtCompileDEX);
- compiler_backend_->Init(*this);
+ compiler_->Init(*this);
CHECK(!Runtime::Current()->IsStarted());
if (!image_) {
@@ -380,7 +380,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
// Are we generating CFI information?
if (compiler_options->GetGenerateGDBInformation()) {
- cfi_info_.reset(compiler_backend_->GetCallFrameInformationInitialization(*this));
+ cfi_info_.reset(compiler_->GetCallFrameInformationInitialization(*this));
}
}
@@ -430,7 +430,7 @@ CompilerDriver::~CompilerDriver() {
STLDeleteElements(&classes_to_patch_);
}
CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key");
- compiler_backend_->UnInit(*this);
+ compiler_->UnInit(*this);
}
CompilerTls* CompilerDriver::GetTls() {
@@ -961,29 +961,6 @@ void CompilerDriver::ProcessedInvoke(InvokeType invoke_type, int flags) {
stats_->ProcessedInvoke(invoke_type, flags);
}
-bool CompilerDriver::ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
- verifier::MethodVerifier* verifier,
- InlineIGetIPutData* result) {
- mirror::DexCache* dex_cache = verifier->GetDexCache();
- uint32_t method_idx = verifier->GetMethodReference().dex_method_index;
- mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
- mirror::ArtField* field = dex_cache->GetResolvedField(field_idx);
- if (method == nullptr || field == nullptr || field->IsStatic()) {
- return false;
- }
- mirror::Class* method_class = method->GetDeclaringClass();
- mirror::Class* field_class = field->GetDeclaringClass();
- if (!method_class->CanAccessResolvedField(field_class, field, dex_cache, field_idx) ||
- (is_put && field->IsFinal() && method_class != field_class)) {
- return false;
- }
- DCHECK_GE(field->GetOffset().Int32Value(), 0);
- result->field_idx = field_idx;
- result->field_offset = field->GetOffset().Int32Value();
- result->is_volatile = field->IsVolatile();
- return true;
-}
-
bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
bool is_put, MemberOffset* field_offset,
bool* is_volatile) {
@@ -1077,7 +1054,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
*direct_method = 0;
bool use_dex_cache = false;
const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
- if (compiler_backend_->IsPortable()) {
+ if (compiler_->IsPortable()) {
if (sharp_type != kStatic && sharp_type != kDirect) {
return;
}
@@ -1120,8 +1097,6 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
if (target_method->dex_file == method->GetDeclaringClass()->GetDexCache()->GetDexFile()) {
target_method->dex_method_index = method->GetDexMethodIndex();
} else {
- // TODO: support patching from one dex file to another in the boot image.
- use_dex_cache = use_dex_cache || compiling_boot;
if (no_guarantee_of_dex_cache_entry) {
// See if the method is also declared in this dex cache.
uint32_t dex_method_idx = MethodHelper(method).FindDexMethodIndexInOtherDexFile(
@@ -1129,6 +1104,10 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
if (dex_method_idx != DexFile::kDexNoIndex) {
target_method->dex_method_index = dex_method_idx;
} else {
+ if (compiling_boot) {
+ target_method->dex_method_index = method->GetDexMethodIndex();
+ target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ }
must_use_direct_pointers = true;
}
}
@@ -1153,13 +1132,13 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
CHECK(!method->IsAbstract());
*type = sharp_type;
*direct_method = reinterpret_cast<uintptr_t>(method);
- *direct_code = compiler_backend_->GetEntryPointOf(method);
+ *direct_code = compiler_->GetEntryPointOf(method);
target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
target_method->dex_method_index = method->GetDexMethodIndex();
} else if (!must_use_direct_pointers) {
// Set the code and rely on the dex cache for the method.
*type = sharp_type;
- *direct_code = compiler_backend_->GetEntryPointOf(method);
+ *direct_code = compiler_->GetEntryPointOf(method);
} else {
// Direct pointers were required but none were available.
VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
@@ -1254,6 +1233,7 @@ void CompilerDriver::AddCodePatch(const DexFile* dex_file,
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset) {
MutexLock mu(Thread::Current(), compiled_methods_lock_);
@@ -1262,6 +1242,7 @@ void CompilerDriver::AddCodePatch(const DexFile* dex_file,
referrer_method_idx,
referrer_invoke_type,
target_method_idx,
+ target_dex_file,
target_invoke_type,
literal_offset));
}
@@ -1270,6 +1251,7 @@ void CompilerDriver::AddRelativeCodePatch(const DexFile* dex_file,
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset,
int32_t pc_relative_offset) {
@@ -1279,6 +1261,7 @@ void CompilerDriver::AddRelativeCodePatch(const DexFile* dex_file,
referrer_method_idx,
referrer_invoke_type,
target_method_idx,
+ target_dex_file,
target_invoke_type,
literal_offset,
pc_relative_offset));
@@ -1288,6 +1271,7 @@ void CompilerDriver::AddMethodPatch(const DexFile* dex_file,
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset) {
MutexLock mu(Thread::Current(), compiled_methods_lock_);
@@ -1296,6 +1280,7 @@ void CompilerDriver::AddMethodPatch(const DexFile* dex_file,
referrer_method_idx,
referrer_invoke_type,
target_method_idx,
+ target_dex_file,
target_invoke_type,
literal_offset));
}
@@ -1884,19 +1869,21 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
uint64_t start_ns = NanoTime();
if ((access_flags & kAccNative) != 0) {
-#if defined(__x86_64__)
- // leaving this empty will trigger the generic JNI version
-#else
- compiled_method = compiler_backend_->JniCompile(*this, access_flags, method_idx, dex_file);
- CHECK(compiled_method != NULL);
-#endif
+ // Are we only interpreting only and have support for generic JNI down calls?
+ if ((compiler_options_->GetCompilerFilter() == CompilerOptions::kInterpretOnly) &&
+ (instruction_set_ == kX86_64)) {
+ // Leaving this empty will trigger the generic JNI version
+ } else {
+ compiled_method = compiler_->JniCompile(*this, access_flags, method_idx, dex_file);
+ CHECK(compiled_method != NULL);
+ }
} else if ((access_flags & kAccAbstract) != 0) {
} else {
MethodReference method_ref(&dex_file, method_idx);
bool compile = verification_results_->IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
// NOTE: if compiler declines to compile this method, it will return NULL.
- compiled_method = compiler_backend_->Compile(
+ compiled_method = compiler_->Compile(
*this, code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file);
} else if (dex_to_dex_compilation_level != kDontDexToDexCompile) {
@@ -1908,7 +1895,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
}
}
uint64_t duration_ns = NanoTime() - start_ns;
- if (duration_ns > MsToNs(compiler_backend_->GetMaximumCompilationTimeBeforeWarning())) {
+ if (duration_ns > MsToNs(compiler_->GetMaximumCompilationTimeBeforeWarning())) {
LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file)
<< " took " << PrettyDuration(duration_ns);
}
@@ -1995,7 +1982,7 @@ bool CompilerDriver::WriteElf(const std::string& android_root,
OatWriter* oat_writer,
art::File* file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return compiler_backend_->WriteElf(file, oat_writer, dex_files, android_root, is_host, *this);
+ return compiler_->WriteElf(file, oat_writer, dex_files, android_root, is_host, *this);
}
void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set,
std::string* target_triple,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 26210c944c..d88b2aaf99 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -26,7 +26,7 @@
#include "class_reference.h"
#include "compiled_class.h"
#include "compiled_method.h"
-#include "compiler_backend.h"
+#include "compiler.h"
#include "dex_file.h"
#include "instruction_set.h"
#include "invoke_type.h"
@@ -99,7 +99,7 @@ class CompilerDriver {
explicit CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
- CompilerBackend::Kind compiler_backend_kind,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
bool image, DescriptorSet* image_classes,
@@ -137,8 +137,8 @@ class CompilerDriver {
return *compiler_options_;
}
- CompilerBackend* GetCompilerBackend() const {
- return compiler_backend_.get();
+ Compiler* GetCompiler() const {
+ return compiler_.get();
}
bool ProfilePresent() const {
@@ -287,13 +287,6 @@ class CompilerDriver {
void ProcessedStaticField(bool resolved, bool local);
void ProcessedInvoke(InvokeType invoke_type, int flags);
- // Can we fast path instance field access in a verified accessor?
- // If yes, computes field's offset and volatility and whether the method is static or not.
- static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
- verifier::MethodVerifier* verifier,
- InlineIGetIPutData* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
MemberOffset* field_offset, bool* is_volatile)
@@ -323,6 +316,7 @@ class CompilerDriver {
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset)
LOCKS_EXCLUDED(compiled_methods_lock_);
@@ -331,6 +325,7 @@ class CompilerDriver {
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset,
int32_t pc_relative_offset)
@@ -340,6 +335,7 @@ class CompilerDriver {
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset)
LOCKS_EXCLUDED(compiled_methods_lock_);
@@ -456,6 +452,9 @@ class CompilerDriver {
uint32_t GetTargetMethodIdx() const {
return target_method_idx_;
}
+ const DexFile* GetTargetDexFile() const {
+ return target_dex_file_;
+ }
InvokeType GetTargetInvokeType() const {
return target_invoke_type_;
}
@@ -479,18 +478,21 @@ class CompilerDriver {
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset)
: PatchInformation(dex_file, referrer_class_def_idx,
referrer_method_idx, literal_offset),
referrer_invoke_type_(referrer_invoke_type),
target_method_idx_(target_method_idx),
+ target_dex_file_(target_dex_file),
target_invoke_type_(target_invoke_type) {
}
private:
const InvokeType referrer_invoke_type_;
const uint32_t target_method_idx_;
+ const DexFile* target_dex_file_;
const InvokeType target_invoke_type_;
friend class CompilerDriver;
@@ -512,12 +514,13 @@ class CompilerDriver {
uint32_t referrer_method_idx,
InvokeType referrer_invoke_type,
uint32_t target_method_idx,
+ const DexFile* target_dex_file,
InvokeType target_invoke_type,
size_t literal_offset,
int32_t pc_relative_offset)
: CallPatchInformation(dex_file, referrer_class_def_idx,
- referrer_method_idx, referrer_invoke_type,
- target_method_idx, target_invoke_type, literal_offset),
+ referrer_method_idx, referrer_invoke_type, target_method_idx,
+ target_dex_file, target_invoke_type, literal_offset),
offset_(pc_relative_offset) {
}
@@ -708,7 +711,7 @@ class CompilerDriver {
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
- UniquePtr<CompilerBackend> compiler_backend_;
+ UniquePtr<Compiler> compiler_;
const InstructionSet instruction_set_;
const InstructionSetFeatures instruction_set_features_;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index f4b507a64a..5078182b63 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -639,14 +639,17 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
(!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
// We have code for a non-static or initialized method, just use the code.
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
- } else if (quick_code == nullptr && orig->IsNative() && !orig->IsStatic()) {
- // Non-static native method missing compiled code, use generic JNI version.
+ } else if (quick_code == nullptr && orig->IsNative() &&
+ (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) {
+ // Non-static or initialized native method missing compiled code, use generic JNI version.
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_generic_jni_trampoline_offset_));
} else if (quick_code == nullptr && !orig->IsNative()) {
// We don't have code at all for a non-native method, use the interpreter.
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
} else {
- // We have code for a static method, but need to go through the resolution stub for class initialization.
+ CHECK(!orig->GetDeclaringClass()->IsInitialized());
+ // We have code for a static method, but need to go through the resolution stub for class
+ // initialization.
copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
}
const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
@@ -754,20 +757,20 @@ static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* pa
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(patch->GetDexFile()));
+ SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*patch->GetTargetDexFile()));
SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
- ArtMethod* method = class_linker->ResolveMethod(patch->GetDexFile(),
+ ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
patch->GetTargetMethodIdx(),
dex_cache,
class_loader,
NULL,
patch->GetTargetInvokeType());
CHECK(method != NULL)
- << patch->GetDexFile().GetLocation() << " " << patch->GetTargetMethodIdx();
+ << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
CHECK(!method->IsRuntimeMethod())
- << patch->GetDexFile().GetLocation() << " " << patch->GetTargetMethodIdx();
+ << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method)
- << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
+ << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
<< PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " "
<< PrettyMethod(method);
return method;
@@ -861,7 +864,7 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch
if (kIsDebugBuild) {
if (patch->IsCall()) {
const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall();
- const DexFile::MethodId& id = cpatch->GetDexFile().GetMethodId(cpatch->GetTargetMethodIdx());
+ const DexFile::MethodId& id = cpatch->GetTargetDexFile()->GetMethodId(cpatch->GetTargetMethodIdx());
uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
uint32_t actual = *patch_location;
CHECK(actual == expected || actual == value) << std::hex
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index df5afa223a..31acb69e31 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -591,7 +591,7 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
// Build stack trace
jobject internal = Thread::Current()->CreateInternalStackTrace(soa);
- jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal);
+ jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal);
mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
EXPECT_TRUE(trace_array != NULL);
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
index 4ce714a183..2812700fa3 100644
--- a/compiler/llvm/compiler_llvm.cc
+++ b/compiler/llvm/compiler_llvm.cc
@@ -39,7 +39,7 @@
namespace art {
void CompileOneMethod(CompilerDriver& driver,
- CompilerBackend* compilerBackend,
+ Compiler* compiler,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
@@ -142,7 +142,7 @@ CompileDexMethod(DexCompilationUnit* dex_compilation_unit, InvokeType invoke_typ
cunit->SetCompilerDriver(compiler_driver_);
// TODO: consolidate ArtCompileMethods
CompileOneMethod(*compiler_driver_,
- compiler_driver_->GetCompilerBackend(),
+ compiler_driver_->GetCompiler(),
dex_compilation_unit->GetCodeItem(),
dex_compilation_unit->GetAccessFlags(),
invoke_type,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 93c35022f2..9cfef12b26 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -15,7 +15,7 @@
*/
#include "common_compiler_test.h"
-#include "compiler/compiler_backend.h"
+#include "compiler/compiler.h"
#include "compiler/oat_writer.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -84,9 +84,9 @@ TEST_F(OatTest, WriteRead) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: make selectable.
- CompilerBackend::Kind compiler_backend = kUsePortableCompiler
- ? CompilerBackend::kPortable
- : CompilerBackend::kQuick;
+ Compiler::Kind compiler_kind = kUsePortableCompiler
+ ? Compiler::kPortable
+ : Compiler::kQuick;
InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
InstructionSetFeatures insn_features;
@@ -99,7 +99,7 @@ TEST_F(OatTest, WriteRead) {
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
method_inliner_map_.get(),
- compiler_backend, insn_set,
+ compiler_kind, insn_set,
insn_features, false, NULL, 2, true, true,
timer_.get()));
jobject class_loader = NULL;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index ffd7b417e3..c5219a6f16 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -364,7 +364,7 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
OatClass* oat_class = oat_classes_[oat_class_index];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) {
+ if (compiled_method != nullptr) {
const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
if (portable_code != nullptr) {
@@ -495,6 +495,33 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
if (compiler_driver_->IsImage()) {
+ // Derive frame size and spill masks for native methods without code:
+ // These are generic JNI methods...
+ if (is_native && compiled_method == nullptr) {
+ // Compute Sirt size as putting _every_ reference into it, even null ones.
+ uint32_t s_len;
+ const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx), &s_len);
+ DCHECK(shorty != nullptr);
+ uint32_t refs = 1; // Native method always has "this" or class.
+ for (uint32_t i = 1; i < s_len; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(refs);
+
+ // Get the generic spill masks and base frame size.
+ mirror::ArtMethod* callee_save_method =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+
+ frame_size_in_bytes = callee_save_method->GetFrameSizeInBytes() + sirt_size;
+ core_spill_mask = callee_save_method->GetCoreSpillMask();
+ fp_spill_mask = callee_save_method->GetFpSpillMask();
+ mapping_table_offset = 0;
+ vmap_table_offset = 0;
+ gc_map_offset = 0;
+ }
+
ClassLinker* linker = Runtime::Current()->GetClassLinker();
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 190c92562f..8c6a8cb19f 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -28,19 +28,25 @@ void HGraphBuilder::InitializeLocals(int count) {
for (int i = 0; i < count; i++) {
HLocal* local = new (arena_) HLocal(i);
entry_block_->AddInstruction(local);
- locals_.Put(0, local);
+ locals_.Put(i, local);
}
}
static bool CanHandleCodeItem(const DexFile::CodeItem& code_item) {
- if (code_item.tries_size_ > 0) return false;
- if (code_item.outs_size_ > 0) return false;
- if (code_item.ins_size_ > 0) return false;
+ if (code_item.tries_size_ > 0) {
+ return false;
+ } else if (code_item.outs_size_ > 0) {
+ return false;
+ } else if (code_item.ins_size_ > 0) {
+ return false;
+ }
return true;
}
HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
- if (!CanHandleCodeItem(code_item)) return nullptr;
+ if (!CanHandleCodeItem(code_item)) {
+ return nullptr;
+ }
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
@@ -78,7 +84,9 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
void HGraphBuilder::MaybeUpdateCurrentBlock(size_t index) {
HBasicBlock* block = FindBlockStartingAt(index);
- if (block == nullptr) return;
+ if (block == nullptr) {
+ return;
+ }
if (current_block_ != nullptr) {
// Branching instructions clear current_block, so we know
@@ -131,7 +139,9 @@ HBasicBlock* HGraphBuilder::FindBlockStartingAt(int32_t index) const {
}
bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset) {
- if (current_block_ == nullptr) return true; // Dead code
+ if (current_block_ == nullptr) {
+ return true; // Dead code
+ }
switch (instruction.Opcode()) {
case Instruction::CONST_4: {
@@ -140,11 +150,14 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
UpdateLocal(register_index, constant);
break;
}
- case Instruction::RETURN_VOID:
+
+ case Instruction::RETURN_VOID: {
current_block_->AddInstruction(new (arena_) HReturnVoid());
current_block_->AddSuccessor(exit_block_);
current_block_ = nullptr;
break;
+ }
+
case Instruction::IF_EQ: {
HInstruction* first = LoadLocal(instruction.VRegA());
HInstruction* second = LoadLocal(instruction.VRegB());
@@ -159,6 +172,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
current_block_ = nullptr;
break;
}
+
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
@@ -169,8 +183,18 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
current_block_ = nullptr;
break;
}
+
+ case Instruction::RETURN: {
+ HInstruction* value = LoadLocal(instruction.VRegA());
+ current_block_->AddInstruction(new (arena_) HReturn(value));
+ current_block_->AddSuccessor(exit_block_);
+ current_block_ = nullptr;
+ break;
+ }
+
case Instruction::NOP:
break;
+
default:
return false;
}
@@ -178,15 +202,27 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
}
HIntConstant* HGraphBuilder::GetConstant0() {
- if (constant0_ != nullptr) return constant0_;
- HIntConstant* constant = new(arena_) HIntConstant(0);
- entry_block_->AddInstruction(constant);
- return constant;
+ if (constant0_ != nullptr) {
+ return constant0_;
+ }
+ constant0_ = new(arena_) HIntConstant(0);
+ entry_block_->AddInstruction(constant0_);
+ return constant0_;
+}
+
+HIntConstant* HGraphBuilder::GetConstant1() {
+ if (constant1_ != nullptr) {
+ return constant1_;
+ }
+ constant1_ = new(arena_) HIntConstant(1);
+ entry_block_->AddInstruction(constant1_);
+ return constant1_;
}
HIntConstant* HGraphBuilder::GetConstant(int constant) {
switch (constant) {
case 0: return GetConstant0();
+ case 1: return GetConstant1();
default: {
HIntConstant* instruction = new (arena_) HIntConstant(constant);
entry_block_->AddInstruction(instruction);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 399dd63ae0..fff83a1205 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -41,7 +41,8 @@ class HGraphBuilder : public ValueObject {
exit_block_(nullptr),
current_block_(nullptr),
graph_(nullptr),
- constant0_(nullptr) { }
+ constant0_(nullptr),
+ constant1_(nullptr) { }
HGraph* BuildGraph(const DexFile::CodeItem& code);
@@ -58,6 +59,7 @@ class HGraphBuilder : public ValueObject {
HBasicBlock* FindBlockStartingAt(int32_t index) const;
HIntConstant* GetConstant0();
+ HIntConstant* GetConstant1();
HIntConstant* GetConstant(int constant);
void InitializeLocals(int count);
HLocal* GetLocalAt(int register_index) const;
@@ -79,6 +81,7 @@ class HGraphBuilder : public ValueObject {
HGraph* graph_;
HIntConstant* constant0_;
+ HIntConstant* constant1_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 01fc23b50c..56342aa684 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -26,9 +26,11 @@
namespace art {
void CodeGenerator::Compile(CodeAllocator* allocator) {
- GenerateFrameEntry();
const GrowableArray<HBasicBlock*>* blocks = graph()->blocks();
- for (size_t i = 0; i < blocks->Size(); i++) {
+ DCHECK(blocks->Get(0) == graph()->entry_block());
+ DCHECK(GoesToNextBlock(graph()->entry_block(), blocks->Get(1)));
+ CompileEntryBlock();
+ for (size_t i = 1; i < blocks->Size(); i++) {
CompileBlock(blocks->Get(i));
}
size_t code_size = assembler_->CodeSize();
@@ -37,17 +39,54 @@ void CodeGenerator::Compile(CodeAllocator* allocator) {
assembler_->FinalizeInstructions(code);
}
+void CodeGenerator::CompileEntryBlock() {
+ HGraphVisitor* location_builder = GetLocationBuilder();
+ // The entry block contains all locals for this method. By visiting the entry block,
+ // we're computing the required frame size.
+ for (HInstructionIterator it(graph()->entry_block()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ // Instructions in the entry block should not generate code.
+ if (kIsDebugBuild) {
+ current->Accept(location_builder);
+ DCHECK(current->locations() == nullptr);
+ }
+ current->Accept(this);
+ }
+ GenerateFrameEntry();
+}
+
void CodeGenerator::CompileBlock(HBasicBlock* block) {
Bind(GetLabelOf(block));
+ HGraphVisitor* location_builder = GetLocationBuilder();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- it.Current()->Accept(this);
+ // For each instruction, we emulate a stack-based machine, where the inputs are popped from
+ // the runtime stack, and the result is pushed on the stack. We currently can do this because
+ // we do not perform any code motion, and the Dex format does not reference individual
+ // instructions but uses registers instead (our equivalent of HLocal).
+ HInstruction* current = it.Current();
+ current->Accept(location_builder);
+ InitLocations(current);
+ current->Accept(this);
+ if (current->locations() != nullptr && current->locations()->Out().IsValid()) {
+ Push(current, current->locations()->Out());
+ }
+ }
+}
+
+void CodeGenerator::InitLocations(HInstruction* instruction) {
+ if (instruction->locations() == nullptr) return;
+ for (int i = 0; i < instruction->InputCount(); i++) {
+ Location location = instruction->locations()->InAt(i);
+ if (location.IsValid()) {
+ // Move the input to the desired location.
+ Move(instruction->InputAt(i), location);
+ }
}
}
-bool CodeGenerator::GoesToNextBlock(HGoto* goto_instruction) const {
- HBasicBlock* successor = goto_instruction->GetSuccessor();
+bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
// We currently iterate over the block in insertion order.
- return goto_instruction->block()->block_id() + 1 == successor->block_id();
+ return current->block_id() + 1 == next->block_id();
}
Label* CodeGenerator::GetLabelOf(HBasicBlock* block) const {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2a5ae7d751..c406378af4 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
+#include "globals.h"
#include "instruction_set.h"
#include "memory_region.h"
#include "nodes.h"
@@ -35,12 +36,82 @@ class CodeAllocator {
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
};
+/**
+ * A Location is an abstraction over the potential location
+ * of an instruction. It could be in register or stack.
+ */
+class Location : public ValueObject {
+ public:
+ template<typename T>
+ T reg() const { return static_cast<T>(reg_); }
+
+ Location() : reg_(kInvalid) { }
+ explicit Location(uword reg) : reg_(reg) { }
+
+ static Location RegisterLocation(uword reg) {
+ return Location(reg);
+ }
+
+ bool IsValid() const { return reg_ != kInvalid; }
+
+ Location(const Location& other) : reg_(other.reg_) { }
+
+ Location& operator=(const Location& other) {
+ reg_ = other.reg_;
+ return *this;
+ }
+
+ private:
+ // The target register for that location.
+ // TODO: Support stack location.
+ uword reg_;
+ static const uword kInvalid = -1;
+};
+
+/**
+ * The code generator computes LocationSummary for each instruction so that
+ * the instruction itself knows what code to generate: where to find the inputs
+ * and where to place the result.
+ *
+ * The intent is to have the code for generating the instruction independent of
+ * register allocation. A register allocator just has to provide a LocationSummary.
+ */
+class LocationSummary : public ArenaObject {
+ public:
+ explicit LocationSummary(HInstruction* instruction)
+ : inputs(instruction->block()->graph()->arena(), instruction->InputCount()) {
+ inputs.SetSize(instruction->InputCount());
+ for (int i = 0; i < instruction->InputCount(); i++) {
+ inputs.Put(i, Location());
+ }
+ }
+
+ void SetInAt(uint32_t at, Location location) {
+ inputs.Put(at, location);
+ }
+
+ Location InAt(uint32_t at) const {
+ return inputs.Get(at);
+ }
+
+ void SetOut(Location location) {
+ output = Location(location);
+ }
+
+ Location Out() const { return output; }
+
+ private:
+ GrowableArray<Location> inputs;
+ Location output;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationSummary);
+};
+
class CodeGenerator : public HGraphVisitor {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
- static bool CompileGraph(
- HGraph* graph, InstructionSet instruction_set, CodeAllocator* allocator);
+ static bool CompileGraph(HGraph* graph, InstructionSet instruction_set, CodeAllocator* allocator);
Assembler* assembler() const { return assembler_; }
@@ -54,20 +125,31 @@ class CodeGenerator : public HGraphVisitor {
protected:
CodeGenerator(Assembler* assembler, HGraph* graph)
- : HGraphVisitor(graph), assembler_(assembler), block_labels_(graph->arena(), 0) {
+ : HGraphVisitor(graph),
+ frame_size_(0),
+ assembler_(assembler),
+ block_labels_(graph->arena(), 0) {
block_labels_.SetSize(graph->blocks()->Size());
}
Label* GetLabelOf(HBasicBlock* block) const;
- bool GoesToNextBlock(HGoto* got) const;
+ bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
+
+ // Frame size required for this method.
+ uint32_t frame_size_;
- private:
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(Label* label) = 0;
+ virtual void Move(HInstruction* instruction, Location location) = 0;
+ virtual void Push(HInstruction* instruction, Location location) = 0;
+ virtual HGraphVisitor* GetLocationBuilder() = 0;
+ private:
+ void InitLocations(HInstruction* instruction);
void Compile(CodeAllocator* allocator);
void CompileBlock(HBasicBlock* block);
+ void CompileEntryBlock();
Assembler* const assembler_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 356e909467..62bf7ba4d8 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -24,28 +24,52 @@ namespace art {
namespace arm {
void CodeGeneratorARM::GenerateFrameEntry() {
- RegList registers = (1 << LR) | (1 << FP);
- __ PushList(registers);
+ __ PushList((1 << FP) | (1 << LR));
+ __ mov(FP, ShifterOperand(SP));
+ if (frame_size_ != 0) {
+ __ AddConstant(SP, -frame_size_);
+ }
}
void CodeGeneratorARM::GenerateFrameExit() {
- RegList registers = (1 << PC) | (1 << FP);
- __ PopList(registers);
+ __ mov(SP, ShifterOperand(FP));
+ __ PopList((1 << FP) | (1 << PC));
}
void CodeGeneratorARM::Bind(Label* label) {
__ Bind(label);
}
+void CodeGeneratorARM::Push(HInstruction* instruction, Location location) {
+ __ Push(location.reg<Register>());
+}
+
+void CodeGeneratorARM::Move(HInstruction* instruction, Location location) {
+ HIntConstant* constant = instruction->AsIntConstant();
+ if (constant != nullptr) {
+ __ LoadImmediate(location.reg<Register>(), constant->value());
+ } else {
+ __ Pop(location.reg<Register>());
+ }
+}
+
+void LocationsBuilderARM::VisitGoto(HGoto* got) {
+ got->set_locations(nullptr);
+}
+
void CodeGeneratorARM::VisitGoto(HGoto* got) {
HBasicBlock* successor = got->GetSuccessor();
if (graph()->exit_block() == successor) {
GenerateFrameExit();
- } else if (!GoesToNextBlock(got)) {
+ } else if (!GoesToNextBlock(got->block(), successor)) {
__ b(GetLabelOf(successor));
}
}
+void LocationsBuilderARM::VisitExit(HExit* exit) {
+ exit->set_locations(nullptr);
+}
+
void CodeGeneratorARM::VisitExit(HExit* exit) {
if (kIsDebugBuild) {
__ Comment("Unreachable");
@@ -53,33 +77,101 @@ void CodeGeneratorARM::VisitExit(HExit* exit) {
}
}
+void LocationsBuilderARM::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(if_instr);
+ locations->SetInAt(0, Location(R0));
+ if_instr->set_locations(locations);
+}
+
void CodeGeneratorARM::VisitIf(HIf* if_instr) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // TODO: Generate the input as a condition, instead of materializing in a register.
+ __ cmp(if_instr->locations()->InAt(0).reg<Register>(), ShifterOperand(0));
+ __ b(GetLabelOf(if_instr->IfFalseSuccessor()), EQ);
+ if (!GoesToNextBlock(if_instr->block(), if_instr->IfTrueSuccessor())) {
+ __ b(GetLabelOf(if_instr->IfTrueSuccessor()));
+ }
+}
+
+void LocationsBuilderARM::VisitEqual(HEqual* equal) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(equal);
+ locations->SetInAt(0, Location(R0));
+ locations->SetInAt(1, Location(R1));
+ locations->SetOut(Location(R0));
+ equal->set_locations(locations);
}
void CodeGeneratorARM::VisitEqual(HEqual* equal) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ LocationSummary* locations = equal->locations();
+ __ teq(locations->InAt(0).reg<Register>(),
+ ShifterOperand(locations->InAt(1).reg<Register>()));
+ __ mov(locations->Out().reg<Register>(), ShifterOperand(1), EQ);
+ __ mov(locations->Out().reg<Register>(), ShifterOperand(0), NE);
+}
+
+void LocationsBuilderARM::VisitLocal(HLocal* local) {
+ local->set_locations(nullptr);
}
void CodeGeneratorARM::VisitLocal(HLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ DCHECK_EQ(local->block(), graph()->entry_block());
+ frame_size_ += kWordSize;
+}
+
+void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(load);
+ locations->SetOut(Location(R0));
+ load->set_locations(locations);
+}
+
+static int32_t GetStackSlot(HLocal* local) {
+ // We are currently using FP to access locals, so the offset must be negative.
+ return (local->reg_number() + 1) * -kWordSize;
+}
+
+void CodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
+ LocationSummary* locations = load->locations();
+ __ LoadFromOffset(kLoadWord, locations->Out().reg<Register>(),
+ FP, GetStackSlot(load->GetLocal()));
+}
+
+void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(store);
+ locations->SetInAt(1, Location(R0));
+ store->set_locations(locations);
}
-void CodeGeneratorARM::VisitLoadLocal(HLoadLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+void CodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = store->locations();
+ __ StoreToOffset(kStoreWord, locations->InAt(1).reg<Register>(),
+ FP, GetStackSlot(store->GetLocal()));
}
-void CodeGeneratorARM::VisitStoreLocal(HStoreLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
+ constant->set_locations(nullptr);
}
void CodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
+ ret->set_locations(nullptr);
}
void CodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
GenerateFrameExit();
}
+void LocationsBuilderARM::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(ret);
+ locations->SetInAt(0, Location(R0));
+ ret->set_locations(locations);
+}
+
+void CodeGeneratorARM::VisitReturn(HReturn* ret) {
+ DCHECK_EQ(ret->locations()->InAt(0).reg<Register>(), R0);
+ GenerateFrameExit();
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 27a83b80c7..33d8e624a8 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -27,10 +27,25 @@ class Label;
namespace arm {
+class LocationsBuilderARM : public HGraphVisitor {
+ public:
+ explicit LocationsBuilderARM(HGraph* graph) : HGraphVisitor(graph) { }
+
+#define DECLARE_VISIT_INSTRUCTION(name) \
+ virtual void Visit##name(H##name* instr);
+
+ FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
+};
+
class CodeGeneratorARM : public CodeGenerator {
public:
CodeGeneratorARM(Assembler* assembler, HGraph* graph)
- : CodeGenerator(assembler, graph) { }
+ : CodeGenerator(assembler, graph), location_builder_(graph) { }
// Visit functions for instruction classes.
#define DECLARE_VISIT_INSTRUCTION(name) \
@@ -40,10 +55,19 @@ class CodeGeneratorARM : public CodeGenerator {
#undef DECLARE_VISIT_INSTRUCTION
+ protected:
+ virtual void GenerateFrameEntry() OVERRIDE;
+ virtual void GenerateFrameExit() OVERRIDE;
+ virtual void Bind(Label* label) OVERRIDE;
+ virtual void Move(HInstruction* instruction, Location location) OVERRIDE;
+ virtual void Push(HInstruction* instruction, Location location) OVERRIDE;
+
+ virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ return &location_builder_;
+ }
+
private:
- virtual void GenerateFrameEntry();
- virtual void GenerateFrameExit();
- virtual void Bind(Label* label);
+ LocationsBuilderARM location_builder_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM);
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ab34599c94..81ada4df6f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -26,6 +26,10 @@ namespace x86 {
void CodeGeneratorX86::GenerateFrameEntry() {
__ pushl(EBP);
__ movl(EBP, ESP);
+
+ if (frame_size_ != 0) {
+ __ subl(ESP, Immediate(frame_size_));
+ }
}
void CodeGeneratorX86::GenerateFrameExit() {
@@ -37,15 +41,36 @@ void CodeGeneratorX86::Bind(Label* label) {
__ Bind(label);
}
+void CodeGeneratorX86::Push(HInstruction* instruction, Location location) {
+ __ pushl(location.reg<Register>());
+}
+
+void CodeGeneratorX86::Move(HInstruction* instruction, Location location) {
+ HIntConstant* constant = instruction->AsIntConstant();
+ if (constant != nullptr) {
+ __ movl(location.reg<Register>(), Immediate(constant->value()));
+ } else {
+ __ popl(location.reg<Register>());
+ }
+}
+
+void LocationsBuilderX86::VisitGoto(HGoto* got) {
+ got->set_locations(nullptr);
+}
+
void CodeGeneratorX86::VisitGoto(HGoto* got) {
HBasicBlock* successor = got->GetSuccessor();
if (graph()->exit_block() == successor) {
GenerateFrameExit();
- } else if (!GoesToNextBlock(got)) {
+ } else if (!GoesToNextBlock(got->block(), successor)) {
__ jmp(GetLabelOf(successor));
}
}
+void LocationsBuilderX86::VisitExit(HExit* exit) {
+ exit->set_locations(nullptr);
+}
+
void CodeGeneratorX86::VisitExit(HExit* exit) {
if (kIsDebugBuild) {
__ Comment("Unreachable");
@@ -53,28 +78,81 @@ void CodeGeneratorX86::VisitExit(HExit* exit) {
}
}
+void LocationsBuilderX86::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(if_instr);
+ locations->SetInAt(0, Location(EAX));
+ if_instr->set_locations(locations);
+}
+
void CodeGeneratorX86::VisitIf(HIf* if_instr) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // TODO: Generate the input as a condition, instead of materializing in a register.
+ __ cmpl(if_instr->locations()->InAt(0).reg<Register>(), Immediate(0));
+ __ j(kEqual, GetLabelOf(if_instr->IfFalseSuccessor()));
+ if (!GoesToNextBlock(if_instr->block(), if_instr->IfTrueSuccessor())) {
+ __ jmp(GetLabelOf(if_instr->IfTrueSuccessor()));
+ }
+}
+
+void LocationsBuilderX86::VisitLocal(HLocal* local) {
+ local->set_locations(nullptr);
}
void CodeGeneratorX86::VisitLocal(HLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ DCHECK_EQ(local->block(), graph()->entry_block());
+ frame_size_ += kWordSize;
+}
+
+void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(local);
+ locations->SetOut(Location(EAX));
+ local->set_locations(locations);
+}
+
+static int32_t GetStackSlot(HLocal* local) {
+ // We are currently using EBP to access locals, so the offset must be negative.
+ return (local->reg_number() + 1) * -kWordSize;
+}
+
+void CodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
+ __ movl(load->locations()->Out().reg<Register>(),
+ Address(EBP, GetStackSlot(load->GetLocal())));
}
-void CodeGeneratorX86::VisitLoadLocal(HLoadLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* local) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(local);
+ locations->SetInAt(1, Location(EAX));
+ local->set_locations(locations);
}
-void CodeGeneratorX86::VisitStoreLocal(HStoreLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+void CodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
+ __ movl(Address(EBP, GetStackSlot(store->GetLocal())),
+ store->locations()->InAt(1).reg<Register>());
+}
+
+void LocationsBuilderX86::VisitEqual(HEqual* equal) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(equal);
+ locations->SetInAt(0, Location(EAX));
+ locations->SetInAt(1, Location(ECX));
+ locations->SetOut(Location(EAX));
+ equal->set_locations(locations);
}
void CodeGeneratorX86::VisitEqual(HEqual* equal) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ __ cmpl(equal->locations()->InAt(0).reg<Register>(),
+ equal->locations()->InAt(1).reg<Register>());
+ __ setb(kEqual, equal->locations()->Out().reg<Register>());
+}
+
+void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
+ constant->set_locations(nullptr);
}
void CodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // Will be generated at use site.
+}
+
+void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
+ ret->set_locations(nullptr);
}
void CodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -82,5 +160,17 @@ void CodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
__ ret();
}
+void LocationsBuilderX86::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(ret);
+ locations->SetInAt(0, Location(EAX));
+ ret->set_locations(locations);
+}
+
+void CodeGeneratorX86::VisitReturn(HReturn* ret) {
+ DCHECK_EQ(ret->locations()->InAt(0).reg<Register>(), EAX);
+ GenerateFrameExit();
+ __ ret();
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7dae2ab0e5..dd146b8378 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -27,12 +27,26 @@ class Label;
namespace x86 {
+class LocationsBuilderX86 : public HGraphVisitor {
+ public:
+ explicit LocationsBuilderX86(HGraph* graph) : HGraphVisitor(graph) { }
+
+#define DECLARE_VISIT_INSTRUCTION(name) \
+ virtual void Visit##name(H##name* instr);
+
+ FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
+};
+
class CodeGeneratorX86 : public CodeGenerator {
public:
CodeGeneratorX86(Assembler* assembler, HGraph* graph)
- : CodeGenerator(assembler, graph) { }
+ : CodeGenerator(assembler, graph), location_builder_(graph) { }
- // Visit functions for instruction classes.
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
@@ -40,10 +54,19 @@ class CodeGeneratorX86 : public CodeGenerator {
#undef DECLARE_VISIT_INSTRUCTION
+ protected:
+ virtual void GenerateFrameEntry() OVERRIDE;
+ virtual void GenerateFrameExit() OVERRIDE;
+ virtual void Bind(Label* label) OVERRIDE;
+ virtual void Move(HInstruction* instruction, Location location) OVERRIDE;
+ virtual void Push(HInstruction* instruction, Location location) OVERRIDE;
+
+ virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ return &location_builder_;
+ }
+
private:
- virtual void GenerateFrameEntry();
- virtual void GenerateFrameExit();
- virtual void Bind(Label* label);
+ LocationsBuilderX86 location_builder_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86);
};
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 6d4588dd3d..5020dd0a57 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -45,7 +45,7 @@ class ExecutableMemoryAllocator : public CodeAllocator {
DISALLOW_COPY_AND_ASSIGN(ExecutableMemoryAllocator);
};
-static void TestCode(const uint16_t* data) {
+static void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0) {
ArenaPool pool;
ArenaAllocator arena(&pool);
HGraphBuilder builder(&arena);
@@ -54,13 +54,19 @@ static void TestCode(const uint16_t* data) {
ASSERT_NE(graph, nullptr);
ExecutableMemoryAllocator allocator;
CHECK(CodeGenerator::CompileGraph(graph, kX86, &allocator));
- typedef void (*fptr)();
+ typedef int32_t (*fptr)();
#if defined(__i386__)
- reinterpret_cast<fptr>(allocator.memory())();
+ int32_t result = reinterpret_cast<fptr>(allocator.memory())();
+ if (has_result) {
+ CHECK_EQ(result, expected);
+ }
#endif
CHECK(CodeGenerator::CompileGraph(graph, kArm, &allocator));
#if defined(__arm__)
- reinterpret_cast<fptr>(allocator.memory())();
+ int32_t result = reinterpret_cast<fptr>(allocator.memory())();
+ if (has_result) {
+ CHECK_EQ(result, expected);
+ }
#endif
}
@@ -69,7 +75,7 @@ TEST(CodegenTest, ReturnVoid) {
TestCode(data);
}
-TEST(PrettyPrinterTest, CFG1) {
+TEST(CodegenTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -77,7 +83,7 @@ TEST(PrettyPrinterTest, CFG1) {
TestCode(data);
}
-TEST(PrettyPrinterTest, CFG2) {
+TEST(CodegenTest, CFG2) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
@@ -86,7 +92,7 @@ TEST(PrettyPrinterTest, CFG2) {
TestCode(data);
}
-TEST(PrettyPrinterTest, CFG3) {
+TEST(CodegenTest, CFG3) {
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
@@ -109,7 +115,7 @@ TEST(PrettyPrinterTest, CFG3) {
TestCode(data3);
}
-TEST(PrettyPrinterTest, CFG4) {
+TEST(CodegenTest, CFG4) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
@@ -118,4 +124,70 @@ TEST(PrettyPrinterTest, CFG4) {
TestCode(data);
}
+TEST(CodegenTest, CFG5) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::GOTO | 0x100,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, IntConstant) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, Return1) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::RETURN | 0);
+
+ TestCode(data, true, 0);
+}
+
+TEST(CodegenTest, Return2) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 0 | 1 << 8,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 0);
+}
+
+TEST(CodegenTest, Return3) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 1 << 8 | 1 << 12,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 1);
+}
+
+TEST(CodegenTest, ReturnIf1) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 1 << 8 | 1 << 12,
+ Instruction::IF_EQ, 3,
+ Instruction::RETURN | 0 << 8,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 1);
+}
+
+TEST(CodegenTest, ReturnIf2) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 1 << 8 | 1 << 12,
+ Instruction::IF_EQ | 0 << 4 | 1 << 8, 3,
+ Instruction::RETURN | 0 << 8,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 0);
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 16dfb9465c..a6f3f5ad0b 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -121,6 +121,7 @@ void HGraph::VisitBlockForDominatorTree(HBasicBlock* block,
}
void HBasicBlock::AddInstruction(HInstruction* instruction) {
+ DCHECK(instruction->block() == nullptr);
instruction->set_block(this);
instruction->set_id(graph()->GetNextInstructionId());
if (first_instruction_ == nullptr) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index bb08bd0e9a..94185992e2 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -27,6 +27,7 @@ class HBasicBlock;
class HInstruction;
class HIntConstant;
class HGraphVisitor;
+class LocationSummary;
static const int kDefaultNumberOfBlocks = 8;
static const int kDefaultNumberOfSuccessors = 2;
@@ -186,12 +187,18 @@ class HBasicBlock : public ArenaObject {
M(IntConstant) \
M(LoadLocal) \
M(Local) \
+ M(Return) \
M(ReturnVoid) \
M(StoreLocal) \
+#define FORWARD_DECLARATION(type) class H##type;
+FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
+#undef FORWARD_DECLARATION
+
#define DECLARE_INSTRUCTION(type) \
virtual void Accept(HGraphVisitor* visitor); \
virtual const char* DebugName() const { return #type; } \
+ virtual H##type* As##type() { return this; } \
class HUseListNode : public ArenaObject {
public:
@@ -210,7 +217,14 @@ class HUseListNode : public ArenaObject {
class HInstruction : public ArenaObject {
public:
- HInstruction() : previous_(nullptr), next_(nullptr), block_(nullptr), id_(-1), uses_(nullptr) { }
+ HInstruction()
+ : previous_(nullptr),
+ next_(nullptr),
+ block_(nullptr),
+ id_(-1),
+ uses_(nullptr),
+ locations_(nullptr) { }
+
virtual ~HInstruction() { }
HInstruction* next() const { return next_; }
@@ -236,6 +250,15 @@ class HInstruction : public ArenaObject {
int id() const { return id_; }
void set_id(int id) { id_ = id; }
+ LocationSummary* locations() const { return locations_; }
+ void set_locations(LocationSummary* locations) { locations_ = locations; }
+
+#define INSTRUCTION_TYPE_CHECK(type) \
+ virtual H##type* As##type() { return nullptr; }
+
+ FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
+#undef INSTRUCTION_TYPE_CHECK
+
private:
HInstruction* previous_;
HInstruction* next_;
@@ -248,6 +271,9 @@ class HInstruction : public ArenaObject {
HUseListNode* uses_;
+ // Set by the code generator.
+ LocationSummary* locations_;
+
friend class HBasicBlock;
DISALLOW_COPY_AND_ASSIGN(HInstruction);
@@ -386,6 +412,20 @@ class HReturnVoid : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HReturnVoid);
};
+// Represents dex's RETURN opcodes. A HReturn is a control flow
+// instruction that branches to the exit block.
+class HReturn : public HTemplateInstruction<1> {
+ public:
+ explicit HReturn(HInstruction* value) {
+ SetRawInputAt(0, value);
+ }
+
+ DECLARE_INSTRUCTION(Return)
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HReturn);
+};
+
// The exit instruction is the only instruction of the exit block.
// Instructions aborting the method (HTrow and HReturn) must branch to the
// exit block.
@@ -422,6 +462,14 @@ class HIf : public HTemplateInstruction<1> {
SetRawInputAt(0, input);
}
+ HBasicBlock* IfTrueSuccessor() const {
+ return block()->successors()->Get(0);
+ }
+
+ HBasicBlock* IfFalseSuccessor() const {
+ return block()->successors()->Get(1);
+ }
+
DECLARE_INSTRUCTION(If)
private:
@@ -449,9 +497,11 @@ class HLocal : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(Local)
+ uint16_t reg_number() const { return reg_number_; }
+
private:
- // The register number in Dex.
- uint16_t reg_number_;
+ // The Dex register number.
+ const uint16_t reg_number_;
DISALLOW_COPY_AND_ASSIGN(HLocal);
};
@@ -463,6 +513,8 @@ class HLoadLocal : public HTemplateInstruction<1> {
SetRawInputAt(0, local);
}
+ HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
+
DECLARE_INSTRUCTION(LoadLocal)
private:
@@ -478,6 +530,8 @@ class HStoreLocal : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
+
DECLARE_INSTRUCTION(StoreLocal)
private:
@@ -490,6 +544,8 @@ class HIntConstant : public HTemplateInstruction<0> {
public:
explicit HIntConstant(int32_t value) : value_(value) { }
+ int32_t value() const { return value_; }
+
DECLARE_INSTRUCTION(IntConstant)
private:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
new file mode 100644
index 0000000000..73323a4846
--- /dev/null
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compilers.h"
+
+namespace art {
+
+CompiledMethod* OptimizingCompiler::TryCompile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ return nullptr;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index bf13a41397..67c4850ca5 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -26,4 +26,7 @@
#define ONE_REGISTER_CODE_ITEM(...) \
{ 1, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+#define TWO_REGISTERS_CODE_ITEM(...) \
+ { 2, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index cc78816546..908d995451 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -30,9 +30,10 @@
#include "base/timing_logger.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
+#include "dex/pass_driver.h"
#include "dex/verification_results.h"
#include "driver/compiler_callbacks_impl.h"
#include "driver/compiler_driver.h"
@@ -147,7 +148,7 @@ static void Usage(const char* fmt, ...) {
UsageError(" Example: --instruction-set-features=div");
UsageError(" Default: default");
UsageError("");
- UsageError(" --compiler-backend=(Quick|QuickGBC|Portable): select compiler backend");
+ UsageError(" --compiler-backend=(Quick|Optimizing|Portable): select compiler backend");
UsageError(" set.");
UsageError(" Example: --compiler-backend=Portable");
UsageError(" Default: Quick");
@@ -203,6 +204,11 @@ static void Usage(const char* fmt, ...) {
UsageError("");
UsageError(" --profile-file=<filename>: specify profiler output file to use for compilation.");
UsageError("");
+ UsageError(" --print-pass-names: print a list of pass names");
+ UsageError("");
+ UsageError(" --disable-passes=<pass-names>: disable one or more passes separated by comma.");
+ UsageError(" Example: --disable-passes=UseCount,BBOptimizations");
+ UsageError("");
std::cerr << "See log for usage error information\n";
exit(EXIT_FAILURE);
}
@@ -212,7 +218,7 @@ class Dex2Oat {
static bool Create(Dex2Oat** p_dex2oat,
const Runtime::Options& runtime_options,
const CompilerOptions& compiler_options,
- CompilerBackend::Kind compiler_backend,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
VerificationResults* verification_results,
@@ -222,7 +228,7 @@ class Dex2Oat {
CHECK(verification_results != nullptr);
CHECK(method_inliner_map != nullptr);
UniquePtr<Dex2Oat> dex2oat(new Dex2Oat(&compiler_options,
- compiler_backend,
+ compiler_kind,
instruction_set,
instruction_set_features,
verification_results,
@@ -335,7 +341,7 @@ class Dex2Oat {
UniquePtr<CompilerDriver> driver(new CompilerDriver(compiler_options_,
verification_results_,
method_inliner_map_,
- compiler_backend_,
+ compiler_kind_,
instruction_set_,
instruction_set_features_,
image,
@@ -346,7 +352,7 @@ class Dex2Oat {
&compiler_phases_timings,
profile_file));
- driver->GetCompilerBackend()->SetBitcodeFileName(*driver.get(), bitcode_filename);
+ driver->GetCompiler()->SetBitcodeFileName(*driver.get(), bitcode_filename);
driver->CompileAll(class_loader, dex_files, &timings);
@@ -410,14 +416,14 @@ class Dex2Oat {
private:
explicit Dex2Oat(const CompilerOptions* compiler_options,
- CompilerBackend::Kind compiler_backend,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
size_t thread_count)
: compiler_options_(compiler_options),
- compiler_backend_(compiler_backend),
+ compiler_kind_(compiler_kind),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
verification_results_(verification_results),
@@ -482,7 +488,7 @@ class Dex2Oat {
}
const CompilerOptions* const compiler_options_;
- const CompilerBackend::Kind compiler_backend_;
+ const Compiler::Kind compiler_kind_;
const InstructionSet instruction_set_;
const InstructionSetFeatures instruction_set_features_;
@@ -722,9 +728,9 @@ static int dex2oat(int argc, char** argv) {
std::string android_root;
std::vector<const char*> runtime_args;
int thread_count = sysconf(_SC_NPROCESSORS_CONF);
- CompilerBackend::Kind compiler_backend = kUsePortableCompiler
- ? CompilerBackend::kPortable
- : CompilerBackend::kQuick;
+ Compiler::Kind compiler_kind = kUsePortableCompiler
+ ? Compiler::kPortable
+ : Compiler::kQuick;
const char* compiler_filter_string = NULL;
int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold;
int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold;
@@ -738,8 +744,12 @@ static int dex2oat(int argc, char** argv) {
#if defined(__arm__)
InstructionSet instruction_set = kThumb2;
+#elif defined(__aarch64__)
+ InstructionSet instruction_set = kArm64;
#elif defined(__i386__)
InstructionSet instruction_set = kX86;
+#elif defined(__x86_64__)
+ InstructionSet instruction_set = kX86_64;
#elif defined(__mips__)
InstructionSet instruction_set = kMips;
#else
@@ -840,9 +850,11 @@ static int dex2oat(int argc, char** argv) {
} else if (option.starts_with("--compiler-backend=")) {
StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
if (backend_str == "Quick") {
- compiler_backend = CompilerBackend::kQuick;
+ compiler_kind = Compiler::kQuick;
+ } else if (backend_str == "Optimizing") {
+ compiler_kind = Compiler::kOptimizing;
} else if (backend_str == "Portable") {
- compiler_backend = CompilerBackend::kPortable;
+ compiler_kind = Compiler::kPortable;
}
} else if (option.starts_with("--compiler-filter=")) {
compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
@@ -908,6 +920,11 @@ static int dex2oat(int argc, char** argv) {
} else if (option == "--no-profile-file") {
LOG(INFO) << "dex2oat: no profile file supplied (explictly)";
// No profile
+ } else if (option == "--print-pass-names") {
+ PassDriver::PrintPassNames();
+ } else if (option.starts_with("--disable-passes=")) {
+ std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
+ PassDriver::CreateDefaultPassList(disable_passes);
} else {
Usage("Unknown argument %s", option.data());
}
@@ -1097,7 +1114,7 @@ static int dex2oat(int argc, char** argv) {
if (!Dex2Oat::Create(&p_dex2oat,
runtime_options,
compiler_options,
- compiler_backend,
+ compiler_kind,
instruction_set,
instruction_set_features,
&verification_results,
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 5c17a830e9..41ee21365e 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -34,7 +34,9 @@ Disassembler* Disassembler::Create(InstructionSet instruction_set) {
} else if (instruction_set == kMips) {
return new mips::DisassemblerMips();
} else if (instruction_set == kX86) {
- return new x86::DisassemblerX86();
+ return new x86::DisassemblerX86(false);
+ } else if (instruction_set == kX86_64) {
+ return new x86::DisassemblerX86(true);
} else {
UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
return NULL;
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index c35edbbcbf..161a7265ac 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -27,9 +27,6 @@
namespace art {
namespace arm {
-DisassemblerArm::DisassemblerArm() {
-}
-
size_t DisassemblerArm::Dump(std::ostream& os, const uint8_t* begin) {
if ((reinterpret_cast<intptr_t>(begin) & 1) == 0) {
DumpArm(os, begin);
@@ -720,7 +717,8 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
if (Rn.r == 15 && U == 1) {
intptr_t lit_adr = reinterpret_cast<intptr_t>(instr_ptr);
lit_adr = RoundDown(lit_adr, 4) + 4 + (imm8 << 2);
- args << StringPrintf(" ; 0x%" PRIx64, *reinterpret_cast<int64_t*>(lit_adr));
+ typedef const int64_t unaligned_int64_t __attribute__ ((aligned (2)));
+ args << StringPrintf(" ; 0x%" PRIx64, *reinterpret_cast<unaligned_int64_t*>(lit_adr));
}
} else if (Rn.r == 13 && W == 1 && U == L) { // VPUSH/VPOP
opcode << (L == 1 ? "vpop" : "vpush");
diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h
index e34274e126..f6d7fdad52 100644
--- a/disassembler/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -24,12 +24,13 @@
namespace art {
namespace arm {
-class DisassemblerArm : public Disassembler {
+class DisassemblerArm FINAL : public Disassembler {
public:
- DisassemblerArm();
+ DisassemblerArm() {
+ }
- virtual size_t Dump(std::ostream& os, const uint8_t* begin);
- virtual void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end);
+ size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
private:
void DumpArm(std::ostream& os, const uint8_t* instr);
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index d759df661c..28c0fa7556 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -25,14 +25,14 @@
namespace art {
namespace arm64 {
-class DisassemblerArm64 : public Disassembler {
+class DisassemblerArm64 FINAL : public Disassembler {
public:
DisassemblerArm64() {
decoder.AppendVisitor(&disasm);
}
- size_t Dump(std::ostream& os, const uint8_t* begin);
- void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end);
+ size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
private:
vixl::Decoder decoder;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 25bbae68ef..72ff761426 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -257,9 +257,6 @@ static void DumpMips(std::ostream& os, const uint8_t* instr_ptr) {
os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instruction, opcode.c_str()) << args.str() << '\n';
}
-DisassemblerMips::DisassemblerMips() {
-}
-
size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* begin) {
DumpMips(os, begin);
return 4;
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index d3862676a0..e1fb0340ab 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -24,11 +24,13 @@
namespace art {
namespace mips {
-class DisassemblerMips : public Disassembler {
+class DisassemblerMips FINAL : public Disassembler {
public:
- DisassemblerMips();
- virtual size_t Dump(std::ostream& os, const uint8_t* begin);
- virtual void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end);
+ DisassemblerMips() {
+ }
+
+ size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
private:
DISALLOW_COPY_AND_ASSIGN(DisassemblerMips);
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index d86ba7b77a..ab0ee52205 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -25,8 +25,6 @@
namespace art {
namespace x86 {
-DisassemblerX86::DisassemblerX86() {}
-
size_t DisassemblerX86::Dump(std::ostream& os, const uint8_t* begin) {
return DumpInstruction(os, begin);
}
@@ -41,16 +39,21 @@ void DisassemblerX86::Dump(std::ostream& os, const uint8_t* begin, const uint8_t
static const char* gReg8Names[] = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh" };
static const char* gReg16Names[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" };
static const char* gReg32Names[] = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi" };
+static const char* gReg64Names[] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
-static void DumpReg0(std::ostream& os, uint8_t /*rex*/, size_t reg,
+static void DumpReg0(std::ostream& os, uint8_t rex, size_t reg,
bool byte_operand, uint8_t size_override) {
- DCHECK_LT(reg, 8u);
- // TODO: combine rex into size
- size_t size = byte_operand ? 1 : (size_override == 0x66 ? 2 : 4);
+ DCHECK_LT(reg, (rex == 0) ? 8u : 16u);
+ bool rex_w = (rex & 0b1000) != 0;
+ size_t size = byte_operand ? 1 : (size_override == 0x66 ? 2 : (rex_w ? 8 :4));
switch (size) {
case 1: os << gReg8Names[reg]; break;
case 2: os << gReg16Names[reg]; break;
case 4: os << gReg32Names[reg]; break;
+ case 8: os << gReg64Names[reg]; break;
default: LOG(FATAL) << "unexpected size " << size;
}
}
@@ -59,7 +62,8 @@ enum RegFile { GPR, MMX, SSE };
static void DumpReg(std::ostream& os, uint8_t rex, uint8_t reg,
bool byte_operand, uint8_t size_override, RegFile reg_file) {
- size_t reg_num = reg; // TODO: combine with REX.R on 64bit
+ bool rex_r = (rex & 0b0100) != 0;
+ size_t reg_num = rex_r ? (reg + 8) : reg;
if (reg_file == GPR) {
DumpReg0(os, rex, reg_num, byte_operand, size_override);
} else if (reg_file == SSE) {
@@ -70,12 +74,14 @@ static void DumpReg(std::ostream& os, uint8_t rex, uint8_t reg,
}
static void DumpBaseReg(std::ostream& os, uint8_t rex, uint8_t reg) {
- size_t reg_num = reg; // TODO: combine with REX.B on 64bit
+ bool rex_b = (rex & 0b0001) != 0;
+ size_t reg_num = rex_b ? (reg + 8) : reg;
DumpReg0(os, rex, reg_num, false, 0);
}
static void DumpIndexReg(std::ostream& os, uint8_t rex, uint8_t reg) {
- int reg_num = reg; // TODO: combine with REX.X on 64bit
+ bool rex_x = (rex & 0b0010) != 0;
+ uint8_t reg_num = rex_x ? (reg + 8) : reg;
DumpReg0(os, rex, reg_num, false, 0);
}
@@ -138,7 +144,7 @@ size_t DisassemblerX86::DumpInstruction(std::ostream& os, const uint8_t* instr)
instr++;
}
} while (have_prefixes);
- uint8_t rex = (*instr >= 0x40 && *instr <= 0x4F) ? *instr : 0;
+ uint8_t rex = (supports_rex_ && (*instr >= 0x40) && (*instr <= 0x4F)) ? *instr : 0;
bool has_modrm = false;
bool reg_is_opcode = false;
size_t immediate_bytes = 0;
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index 9adaff7048..2565bb1ee7 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -22,14 +22,20 @@
namespace art {
namespace x86 {
-class DisassemblerX86 : public Disassembler {
+class DisassemblerX86 FINAL : public Disassembler {
public:
- DisassemblerX86();
+ explicit DisassemblerX86(bool supports_rex) : supports_rex_(supports_rex) {
+ }
+
+ size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
+ void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
- virtual size_t Dump(std::ostream& os, const uint8_t* begin);
- virtual void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end);
private:
size_t DumpInstruction(std::ostream& os, const uint8_t* instr);
+
+ const bool supports_rex_;
+
+ DISALLOW_COPY_AND_ASSIGN(DisassemblerX86);
};
} // namespace x86
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 98bec85e0a..115363478c 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -52,6 +52,7 @@ LIBART_COMMON_SRC_FILES := \
gc/accounting/mod_union_table.cc \
gc/accounting/space_bitmap.cc \
gc/collector/garbage_collector.cc \
+ gc/collector/immune_region.cc \
gc/collector/mark_sweep.cc \
gc/collector/partial_mark_sweep.cc \
gc/collector/semi_space.cc \
@@ -125,6 +126,7 @@ LIBART_COMMON_SRC_FILES := \
os_linux.cc \
parsed_options.cc \
primitive.cc \
+ quick/inline_method_analyser.cc \
reference_table.cc \
reflection.cc \
runtime.cc \
@@ -137,6 +139,7 @@ LIBART_COMMON_SRC_FILES := \
trace.cc \
transaction.cc \
profiler.cc \
+ fault_handler.cc \
utf.cc \
utils.cc \
verifier/dex_gc_map.cc \
@@ -205,7 +208,8 @@ LIBART_TARGET_SRC_FILES_arm := \
arch/arm/portable_entrypoints_arm.S \
arch/arm/quick_entrypoints_arm.S \
arch/arm/arm_sdiv.S \
- arch/arm/thread_arm.cc
+ arch/arm/thread_arm.cc \
+ arch/arm/fault_handler_arm.cc
LIBART_TARGET_SRC_FILES_x86 := \
arch/x86/context_x86.cc \
@@ -213,7 +217,8 @@ LIBART_TARGET_SRC_FILES_x86 := \
arch/x86/jni_entrypoints_x86.S \
arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
- arch/x86/thread_x86.cc
+ arch/x86/thread_x86.cc \
+ arch/x86/fault_handler_x86.cc
LIBART_TARGET_SRC_FILES_x86_64 := \
arch/x86_64/context_x86_64.cc \
@@ -222,7 +227,8 @@ LIBART_TARGET_SRC_FILES_x86_64 := \
arch/x86_64/portable_entrypoints_x86_64.S \
arch/x86_64/quick_entrypoints_x86_64.S \
arch/x86_64/thread_x86_64.cc \
- monitor_pool.cc
+ monitor_pool.cc \
+ arch/x86_64/fault_handler_x86_64.cc
LIBART_TARGET_SRC_FILES_mips := \
@@ -231,7 +237,8 @@ LIBART_TARGET_SRC_FILES_mips := \
arch/mips/jni_entrypoints_mips.S \
arch/mips/portable_entrypoints_mips.S \
arch/mips/quick_entrypoints_mips.S \
- arch/mips/thread_mips.cc
+ arch/mips/thread_mips.cc \
+ arch/mips/fault_handler_mips.cc
ifeq ($(TARGET_ARCH),arm64)
$(info TODOArm64: $(LOCAL_PATH)/Android.mk Add Arm64 specific runtime files)
@@ -261,6 +268,7 @@ LIBART_HOST_SRC_FILES += \
arch/x86_64/portable_entrypoints_x86_64.S \
arch/x86_64/quick_entrypoints_x86_64.S \
arch/x86_64/thread_x86_64.cc \
+ arch/x86_64/fault_handler_x86_64.cc \
monitor_pool.cc
else
LIBART_HOST_SRC_FILES += \
@@ -269,6 +277,7 @@ LIBART_HOST_SRC_FILES += \
arch/x86/jni_entrypoints_x86.S \
arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
+ arch/x86/fault_handler_x86.cc \
arch/x86/thread_x86.cc
endif
else # HOST_ARCH != x86
@@ -291,6 +300,7 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
lock_word.h \
mirror/class.h \
oat.h \
+ quick/inline_method_analyser.h \
thread.h \
thread_state.h \
verifier/method_verifier.h
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
new file mode 100644
index 0000000000..c748ce9be7
--- /dev/null
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+#include "thread.h"
+#include "thread-inl.h"
+
+//
+// ARM specific fault handler functions.
+//
+
+namespace art {
+
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_test_suspend();
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+ struct ucontext *uc = (struct ucontext *)context;
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uintptr_t* sp = reinterpret_cast<uint32_t*>(sc->arm_sp);
+ if (sp == nullptr) {
+ return;
+ }
+
+ // Work out the return PC. This will be the address of the instruction
+ // following the faulting ldr/str instruction. This is in thumb mode so
+ // the instruction might be a 16 or 32 bit one. Also, the GC map always
+ // has the bottom bit of the PC set so we also need to set that.
+
+ // Need to work out the size of the instruction that caused the exception.
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sc->arm_pc);
+
+ uint16_t instr = ptr[0] | ptr[1] << 8;
+ bool is_32bit = ((instr & 0xF000) == 0xF000) || ((instr & 0xF800) == 0xE800);
+ uint32_t instr_size = is_32bit ? 4 : 2;
+
+ // The method is at the top of the stack.
+ method = sp[0];
+
+ return_pc = (sc->arm_pc + instr_size) | 1;
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ // The code that looks for the catch location needs to know the value of the
+ // ARM PC at the point of call. For Null checks we insert a GC map that is immediately after
+ // the load/store instruction that might cause the fault. However the mapping table has
+ // the low bits set for thumb mode so we need to set the bottom bit for the LR
+ // register in order to find the mapping.
+
+ // Need to work out the size of the instruction that caused the exception.
+ struct ucontext *uc = (struct ucontext *)context;
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sc->arm_pc);
+
+ uint16_t instr = ptr[0] | ptr[1] << 8;
+ bool is_32bit = ((instr & 0xF000) == 0xF000) || ((instr & 0xF800) == 0xE800);
+ uint32_t instr_size = is_32bit ? 4 : 2;
+ sc->arm_lr = (sc->arm_pc + instr_size) | 1; // LR needs to point to gc map location
+ sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ LOG(DEBUG) << "Generating null pointer exception";
+ return true;
+}
+
+// A suspend check is done using the following instruction sequence:
+// 0xf723c0b2: f8d902c0 ldr.w r0, [r9, #704] ; suspend_trigger_
+// .. some intervening instruction
+// 0xf723c0b6: 6800 ldr r0, [r0, #0]
+
+// The offset from r9 is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault (at PC-4 and PC).
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the ldr r0,[r9,#xxx]
+ // where xxx is the offset of the suspend trigger.
+ uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset().Int32Value();
+ uint16_t checkinst2 = 0x6800;
+
+ struct ucontext *uc = (struct ucontext *)context;
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->arm_pc);
+ uint8_t* ptr1 = ptr2 - 4;
+ LOG(DEBUG) << "checking suspend";
+
+ uint16_t inst2 = ptr2[0] | ptr2[1] << 8;
+ LOG(DEBUG) << "inst2: " << std::hex << inst2 << " checkinst2: " << checkinst2;
+ if (inst2 != checkinst2) {
+ // Second instruction is not good, not ours.
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = ptr1 - 40; // Compiler will hoist to a max of 20 instructions.
+ bool found = false;
+ while (ptr1 > limit) {
+ uint32_t inst1 = ((ptr1[0] | ptr1[1] << 8) << 16) | (ptr1[2] | ptr1[3] << 8);
+ LOG(DEBUG) << "inst1: " << std::hex << inst1 << " checkinst1: " << checkinst1;
+ if (inst1 == checkinst1) {
+ found = true;
+ break;
+ }
+ ptr1 -= 2; // Min instruction size is 2 bytes.
+ }
+ if (found) {
+ LOG(DEBUG) << "suspend check match";
+ // This is a suspend check. Arrange for the signal handler to return to
+ // art_quick_test_suspend. Also set LR so that after the suspend check it
+ // will resume the instruction (current PC + 2). PC points to the
+ // ldr r0,[r0,#0] instruction (r0 will be 0, set by the trigger).
+
+ // NB: remember that we need to set the bottom bit of the LR register
+ // to switch to thumb mode.
+ LOG(DEBUG) << "arm lr: " << std::hex << sc->arm_lr;
+ LOG(DEBUG) << "arm pc: " << std::hex << sc->arm_pc;
+ sc->arm_lr = sc->arm_pc + 3; // +2 + 1 (for thumb)
+ sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ LOG(DEBUG) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
new file mode 100644
index 0000000000..8d494c165a
--- /dev/null
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+
+
+//
+// Mips specific fault handler functions.
+//
+
+namespace art {
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
new file mode 100644
index 0000000000..171a541bb4
--- /dev/null
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+
+
+//
+// X86 specific fault handler functions.
+//
+
+namespace art {
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/arch/x86_64/fault_handler_x86_64.cc b/runtime/arch/x86_64/fault_handler_x86_64.cc
new file mode 100644
index 0000000000..3ef19fbb1b
--- /dev/null
+++ b/runtime/arch/x86_64/fault_handler_x86_64.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "fault_handler.h"
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+
+
+//
+// X86_64 specific fault handler functions.
+//
+
+namespace art {
+
+void FaultManager::GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc) {
+}
+
+bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+
+bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+ return false;
+}
+} // namespace art
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 81d5f4c501..5fbf8cb748 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -421,7 +421,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
.Lreturn_float_quick2:
movss %xmm0, (%r8) // Store the floating point result.
ret
-END_FUNCTION art_quick_invoke_stub
+END_FUNCTION art_quick_invoke_static_stub
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
@@ -646,7 +646,43 @@ UNIMPLEMENTED art_quick_set_obj_static
UNIMPLEMENTED art_quick_get32_static
UNIMPLEMENTED art_quick_get64_static
UNIMPLEMENTED art_quick_get_obj_static
-UNIMPLEMENTED art_quick_proxy_invoke_handler
+
+DEFINE_FUNCTION art_quick_proxy_invoke_handler
+ // Save callee and GPR args, mixed together to agree with core spills bitmap of ref. and args
+ // callee save frame.
+ PUSH r15 // Callee save.
+ PUSH r14 // Callee save.
+ PUSH r13 // Callee save.
+ PUSH r12 // Callee save.
+ PUSH r9 // Quick arg 5.
+ PUSH r8 // Quick arg 4.
+ PUSH rsi // Quick arg 1.
+ PUSH rbp // Callee save.
+ PUSH rbx // Callee save.
+ PUSH rdx // Quick arg 2.
+ PUSH rcx // Quick arg 3.
+ // Create space for FPR args and create 2 slots, 1 of padding and 1 for the ArtMethod*.
+ subq LITERAL(80), %rsp
+ CFI_ADJUST_CFA_OFFSET(80)
+ // Save FPRs.
+ movq %xmm0, 16(%rsp)
+ movq %xmm1, 24(%rsp)
+ movq %xmm2, 32(%rsp)
+ movq %xmm3, 40(%rsp)
+ movq %xmm4, 48(%rsp)
+ movq %xmm5, 56(%rsp)
+ movq %xmm6, 64(%rsp)
+ movq %xmm7, 72(%rsp)
+ // Store proxy method to bottom of stack.
+ movq %rdi, 0(%rsp)
+ movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
+ movq %rsp, %rcx // Pass SP.
+ call PLT_SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ movq %rax, %xmm0 // Copy return value in case of float returns.
+ addq LITERAL(168), %rsp // Pop arguments.
+ CFI_ADJUST_CFA_OFFSET(-168)
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
@@ -659,6 +695,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
movq %rsp, %rcx
call PLT_SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
movq %rax, %r10 // Remember returned code pointer in R10.
+ movq (%rsp), %rdi // Load called method into RDI.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
testq %r10, %r10 // If code pointer is NULL goto deliver pending exception.
jz 1f
@@ -674,6 +711,13 @@ END_FUNCTION art_quick_resolution_trampoline
* | |
* | caller method... |
* #-------------------# <--- SP on entry
+ *
+ * |
+ * V
+ *
+ * #-------------------#
+ * | caller method... |
+ * #-------------------#
* | Return |
* | R15 | callee save
* | R14 | callee save
@@ -697,22 +741,7 @@ END_FUNCTION art_quick_resolution_trampoline
* | Padding |
* | RDI/Method* | <- sp
* #-------------------#
- * | local ref cookie | // 4B
- * | padding | // 4B
- * #----------#--------#
- * | | | |
- * | Temp/ | SIRT | | Scratch frame is 4k
- * | Scratch | v |
- * | Frame #--------|
- * | |
- * | #--------|
- * | | ^ |
- * | | JNI | |
- * | | Stack| |
- * #----------#--------# <--- SP on native call (needs alignment?)
- * | |
- * | Stack for Regs | The trampoline assembly will pop these values
- * | | into registers for native call
+ * | Scratch Alloca | 5K scratch space
* #---------#---------#
* | | sp* |
* | Tramp. #---------#
@@ -720,6 +749,35 @@ END_FUNCTION art_quick_resolution_trampoline
* | Tramp. #---------#
* | | method |
* #-------------------# <--- SP on artQuickGenericJniTrampoline
+ *
+ * |
+ * v artQuickGenericJniTrampoline
+ *
+ * #-------------------#
+ * | caller method... |
+ * #-------------------#
+ * | Return |
+ * | Callee-Save Data |
+ * #-------------------#
+ * | SIRT |
+ * #-------------------#
+ * | Method* | <--- (1)
+ * #-------------------#
+ * | local ref cookie | // 4B
+ * | SIRT size | // 4B TODO: roll into call stack alignment?
+ * #-------------------#
+ * | JNI Call Stack |
+ * #-------------------# <--- SP on native call
+ * | |
+ * | Stack for Regs | The trampoline assembly will pop these values
+ * | | into registers for native call
+ * #-------------------#
+ * | Native code ptr |
+ * #-------------------#
+ * | Free scratch |
+ * #-------------------#
+ * | Ptr to (1) | <--- RSP
+ * #-------------------#
*/
/*
* Called to do a generic JNI down-call
@@ -752,7 +810,8 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
// Store native ArtMethod* to bottom of stack.
movq %rdi, 0(%rsp)
movq %rsp, %rbp // save SP at callee-save frame
- CFI_DEF_CFA_REGISTER(rbp)
+ movq %rsp, %rbx
+ CFI_DEF_CFA_REGISTER(rbx)
//
// reserve a lot of space
//
@@ -778,12 +837,19 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rbp, %rsi
call PLT_SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
- test %rax, %rax // Check for error, negative value.
+
+ // At the bottom of the alloca we now have the name pointer to the method=bottom of callee-save
+ // get the adjusted frame pointer
+ popq %rbp
+
+ // Check for error, negative value.
+ test %rax, %rax
js .Lentry_error
- // release part of the alloca
+
+ // release part of the alloca, get the code pointer
addq %rax, %rsp
- // get the code pointer
popq %rax
+
// pop from the register-passing alloca region
// what's the right layout?
popq %rdi
@@ -816,7 +882,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
call PLT_SYMBOL(artQuickGenericJniEndTrampoline)
// Tear down the alloca.
- movq %rbp, %rsp
+ movq %rbx, %rsp
CFI_DEF_CFA_REGISTER(rsp)
// Pending exceptions possible.
@@ -854,12 +920,35 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
movq %rax, %xmm0
ret
.Lentry_error:
- movq %rbp, %rsp
+ movq %rbx, %rsp
+ CFI_DEF_CFA_REGISTER(rsp)
.Lexception_in_native:
- CFI_REL_OFFSET(rsp,176)
// TODO: the SIRT contains the this pointer which is used by the debugger for exception
// delivery.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ movq %xmm0, 16(%rsp) // doesn't make sense!!!
+ movq 24(%rsp), %xmm1 // neither does this!!!
+ movq 32(%rsp), %xmm2
+ movq 40(%rsp), %xmm3
+ movq 48(%rsp), %xmm4
+ movq 56(%rsp), %xmm5
+ movq 64(%rsp), %xmm6
+ movq 72(%rsp), %xmm7
+ // was 80 bytes
+ addq LITERAL(80), %rsp
+ CFI_ADJUST_CFA_OFFSET(-80)
+ // Save callee and GPR args, mixed together to agree with core spills bitmap.
+ POP rcx // Arg.
+ POP rdx // Arg.
+ POP rbx // Callee save.
+ POP rbp // Callee save.
+ POP rsi // Arg.
+ POP r8 // Arg.
+ POP r9 // Arg.
+ POP r12 // Callee save.
+ POP r13 // Callee save.
+ POP r14 // Callee save.
+ POP r15 // Callee save.
+
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
diff --git a/runtime/base/hex_dump_test.cc b/runtime/base/hex_dump_test.cc
index d950961213..3d782b267c 100644
--- a/runtime/base/hex_dump_test.cc
+++ b/runtime/base/hex_dump_test.cc
@@ -24,11 +24,18 @@
namespace art {
+#if defined(__LP64__)
+#define ZEROPREFIX "00000000"
+#else
+#define ZEROPREFIX
+#endif
+
TEST(HexDump, OneLine) {
const char* test_text = "0123456789abcdef";
std::ostringstream oss;
oss << HexDump(test_text, strlen(test_text), false, "");
EXPECT_STREQ(oss.str().c_str(),
+ ZEROPREFIX
"00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 0123456789abcdef");
}
@@ -37,7 +44,9 @@ TEST(HexDump, MultiLine) {
std::ostringstream oss;
oss << HexDump(test_text, strlen(test_text), false, "");
EXPECT_STREQ(oss.str().c_str(),
+ ZEROPREFIX
"00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 0123456789abcdef\n"
+ ZEROPREFIX
"00000010: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF");
}
@@ -56,7 +65,7 @@ TEST(HexDump, Prefix) {
std::ostringstream oss;
oss << HexDump(test_text, strlen(test_text), false, "test prefix: ");
EXPECT_STREQ(oss.str().c_str(),
- "test prefix: 00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 "
+ "test prefix: " ZEROPREFIX "00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 "
"0123456789abcdef");
}
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index b193ff18d4..81755144b9 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -22,7 +22,7 @@
#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
// C++11 final and override keywords that were introduced in GCC version 4.7.
-#if GCC_VERSION >= 40700
+#if defined(__clang__) || GCC_VERSION >= 40700
#define OVERRIDE override
#define FINAL final
#else
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6255c8c4f0..cef99543b5 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -504,13 +504,6 @@ void ClassLinker::FinishInit(Thread* self) {
CHECK_STREQ(fh.GetName(), "zombie");
CHECK_STREQ(fh.GetTypeDescriptor(), "Ljava/lang/Object;");
- gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->SetReferenceOffsets(referent->GetOffset(),
- queue->GetOffset(),
- queueNext->GetOffset(),
- pendingNext->GetOffset(),
- zombie->GetOffset());
-
// ensure all class_roots_ are initialized
for (size_t i = 0; i < kClassRootsMax; i++) {
ClassRoot class_root = static_cast<ClassRoot>(i);
@@ -573,6 +566,38 @@ bool ClassLinker::GenerateOatFile(const char* dex_filename,
argv.push_back("-classpath");
argv.push_back("--runtime-arg");
argv.push_back(Runtime::Current()->GetClassPathString());
+
+ argv.push_back("--runtime-arg");
+ std::string checkstr = "-implicit-checks";
+
+ int nchecks = 0;
+ char checksep = ':';
+
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "null";
+ ++nchecks;
+ }
+ if (!Runtime::Current()->ExplicitSuspendChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "suspend";
+ ++nchecks;
+ }
+
+ if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "stack";
+ ++nchecks;
+ }
+
+ if (nchecks == 0) {
+ checkstr += ":none";
+ }
+ argv.push_back(checkstr);
+
if (!kIsTargetBuild) {
argv.push_back("--host");
}
@@ -1597,7 +1622,10 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
}
const void* result = GetOatMethodFor(method).GetQuickCode();
if (result == nullptr) {
- if (method->IsPortableCompiled()) {
+ if (method->IsNative()) {
+ // No code and native? Use generic trampoline.
+ result = GetQuickGenericJniTrampoline();
+ } else if (method->IsPortableCompiled()) {
// No code? Do we expect portable code?
result = GetQuickToPortableBridge();
} else {
@@ -1707,12 +1735,12 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
bool have_portable_code = false;
if (enter_interpreter) {
// Use interpreter entry point.
-
- // check whether the method is native, in which case it's generic JNI
- portable_code = GetPortableToInterpreterBridge();
+ // Check whether the method is native, in which case it's generic JNI.
if (quick_code == nullptr && portable_code == nullptr && method->IsNative()) {
quick_code = GetQuickGenericJniTrampoline();
+ portable_code = GetPortableToQuickBridge();
} else {
+ portable_code = GetPortableToInterpreterBridge();
quick_code = GetQuickToInterpreterBridge();
}
} else {
@@ -1732,7 +1760,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
}
static void LinkCode(const SirtRef<mirror::ArtMethod>& method, const OatFile::OatClass* oat_class,
- uint32_t method_index)
+ const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Method shouldn't have already been linked.
DCHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
@@ -1787,6 +1815,35 @@ static void LinkCode(const SirtRef<mirror::ArtMethod>& method, const OatFile::Oa
if (method->IsNative()) {
// Unregistering restores the dlsym lookup stub.
method->UnregisterNative(Thread::Current());
+
+ if (enter_interpreter) {
+ // We have a native method here without code. Then it should have either the GenericJni
+ // trampoline as entrypoint (non-static), or the Resolution trampoline (static).
+ DCHECK(method->GetEntryPointFromQuickCompiledCode() ==
+ GetQuickResolutionTrampoline(runtime->GetClassLinker())
+ || method->GetEntryPointFromQuickCompiledCode() == GetQuickGenericJniTrampoline());
+
+ DCHECK_EQ(method->GetFrameSizeInBytes<false>(), 0U);
+
+ // Fix up method metadata if necessary.
+ uint32_t s_len;
+ const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_index), &s_len);
+ uint32_t refs = 1; // Native method always has "this" or class.
+ for (uint32_t i = 1; i < s_len; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(refs);
+
+ // Get the generic spill masks and base frame size.
+ mirror::ArtMethod* callee_save_method =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+
+ method->SetFrameSizeInBytes(callee_save_method->GetFrameSizeInBytes() + sirt_size);
+ method->SetCoreSpillMask(callee_save_method->GetCoreSpillMask());
+ method->SetFpSpillMask(callee_save_method->GetFpSpillMask());
+ }
}
// Allow instrumentation its chance to hijack code.
@@ -1899,7 +1956,7 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
}
klass->SetDirectMethod(i, method.get());
if (oat_class.get() != NULL) {
- LinkCode(method, oat_class.get(), class_def_method_index);
+ LinkCode(method, oat_class.get(), dex_file, it.GetMemberIndex(), class_def_method_index);
}
method->SetMethodIndex(class_def_method_index);
class_def_method_index++;
@@ -1913,7 +1970,7 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
klass->SetVirtualMethod(i, method.get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
if (oat_class.get() != NULL) {
- LinkCode(method, oat_class.get(), class_def_method_index);
+ LinkCode(method, oat_class.get(), dex_file, it.GetMemberIndex(), class_def_method_index);
}
class_def_method_index++;
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index d0555fff0f..7eb7b01e65 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -32,6 +32,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
+#include "mirror/reference.h"
#include "mirror/stack_trace_element.h"
#include "sirt_ref.h"
@@ -624,6 +625,25 @@ struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
};
};
+struct ReferenceOffsets : public CheckOffsets<mirror::Reference> {
+ ReferenceOffsets() : CheckOffsets<mirror::Reference>(false, "Ljava/lang/ref/Reference;") {
+ // alphabetical references
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_), "pendingNext"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_), "queue"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_), "queueNext"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, referent_), "referent"));
+ };
+};
+
+struct FinalizerReferenceOffsets : public CheckOffsets<mirror::FinalizerReference> {
+ FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(false, "Ljava/lang/ref/FinalizerReference;") {
+ // alphabetical references
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_), "next"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_), "prev"));
+ offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie"));
+ };
+};
+
// C++ fields must exactly match the fields in the Java classes. If this fails,
// reorder the fields in the C++ class. Managed class fields are ordered by
// ClassLinker::LinkFields.
@@ -639,6 +659,8 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
EXPECT_TRUE(ClassLoaderOffsets().Check());
EXPECT_TRUE(ProxyOffsets().Check());
EXPECT_TRUE(DexCacheOffsets().Check());
+ EXPECT_TRUE(ReferenceOffsets().Check());
+ EXPECT_TRUE(FinalizerReferenceOffsets().Check());
EXPECT_TRUE(ClassClassOffsets().Check());
EXPECT_TRUE(StringClassOffsets().Check());
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7e2dfd2766..9808869de9 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -28,7 +28,6 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "invoke_arg_array_builder.h"
#include "jdwp/object_registry.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -39,6 +38,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
+#include "reflection.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
@@ -222,7 +222,8 @@ static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
return false;
}
-static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread) {
+static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
+ LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
// A thread may be suspended for GC; in this code, we really want to know whether
// there's a debugger suspension active.
@@ -743,8 +744,7 @@ JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* r
JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
std::vector<JDWP::ObjectId>& monitors,
- std::vector<uint32_t>& stack_depths)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::vector<uint32_t>& stack_depths) {
ScopedObjectAccessUnchecked soa(Thread::Current());
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread;
@@ -793,8 +793,8 @@ JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
return JDWP::ERR_NONE;
}
-JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectId& contended_monitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
+ JDWP::ObjectId& contended_monitor) {
ScopedObjectAccessUnchecked soa(Thread::Current());
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread;
@@ -1025,6 +1025,9 @@ JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result
if (c == NULL) {
return status;
}
+ if (c->IsProxyClass()) {
+ return JDWP::ERR_ABSENT_INFORMATION;
+ }
result = ClassHelper(c).GetSourceFile();
return JDWP::ERR_NONE;
}
@@ -1705,22 +1708,19 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p
if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
// Zombie threads are in the null group.
expandBufAddObjectId(pReply, JDWP::ObjectId(0));
- return JDWP::ERR_NONE;
+ error = JDWP::ERR_NONE;
+ } else if (error == JDWP::ERR_NONE) {
+ mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
+ CHECK(c != nullptr);
+ mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
+ CHECK(f != NULL);
+ mirror::Object* group = f->GetObject(thread_object);
+ CHECK(group != NULL);
+ JDWP::ObjectId thread_group_id = gRegistry->Add(group);
+ expandBufAddObjectId(pReply, thread_group_id);
}
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
- CHECK(c != nullptr);
- mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
- CHECK(f != NULL);
- mirror::Object* group = f->GetObject(thread_object);
- CHECK(group != NULL);
- JDWP::ObjectId thread_group_id = gRegistry->Add(group);
soa.Self()->EndAssertNoThreadSuspension(old_cause);
-
- expandBufAddObjectId(pReply, thread_group_id);
- return JDWP::ERR_NONE;
+ return error;
}
std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
@@ -1798,7 +1798,8 @@ JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
return JDWP::TS_ZOMBIE;
}
-JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) {
+JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
+ JDWP::JdwpSuspendStatus* pSuspendStatus) {
ScopedObjectAccess soa(Thread::Current());
*pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
@@ -2607,6 +2608,7 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) {
class ScopedThreadSuspension {
public:
ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
thread_(NULL),
error_(JDWP::ERR_NONE),
@@ -3050,10 +3052,8 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
- MethodHelper mh(m.get());
- ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
- arg_array.BuildArgArray(soa, pReq->receiver, reinterpret_cast<jvalue*>(pReq->arg_values));
- InvokeWithArgArray(soa, m.get(), &arg_array, &pReq->result_value, mh.GetShorty());
+ pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(pReq->method),
+ reinterpret_cast<jvalue*>(pReq->arg_values));
mirror::Throwable* exception = soa.Self()->GetException(NULL);
soa.Self()->ClearException();
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 6c44bdea8f..6610347ab8 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -220,8 +220,11 @@ class Dbg {
static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id,
std::vector<JDWP::ObjectId>& monitors,
std::vector<uint32_t>& stack_depths)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectId& contended_monitor)
+ static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id,
+ JDWP::ObjectId& contended_monitor)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
//
@@ -301,7 +304,8 @@ class Dbg {
static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string& name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_);
- static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply);
+ static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_);
static std::string GetThreadGroupName(JDWP::ObjectId thread_group_id);
static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId thread_group_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -310,8 +314,14 @@ class Dbg {
static JDWP::ObjectId GetMainThreadGroupId();
static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
- static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus);
- static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply);
+ static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
+ JDWP::JdwpThreadStatus* pThreadStatus,
+ JDWP::JdwpSuspendStatus* pSuspendStatus)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,
+ JDWP::ExpandBuf* pReply)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
// static void WaitForSuspend(JDWP::ObjectId thread_id);
// Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
@@ -321,9 +331,11 @@ class Dbg {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids);
- static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result);
+ static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
size_t frame_count, JDWP::ExpandBuf* buf)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static JDWP::ObjectId GetThreadSelfId()
@@ -350,12 +362,15 @@ class Dbg {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
JDWP::JdwpTag tag, uint8_t* buf, size_t expectedLen)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
JDWP::JdwpTag tag, uint64_t value, size_t width)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id);
+ static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_);
/*
* Debugger notification
@@ -413,6 +428,7 @@ class Dbg {
JDWP::JdwpStepDepth depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void UnconfigureStep(JDWP::ObjectId thread_id)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static JDWP::JdwpError InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
@@ -431,7 +447,8 @@ class Dbg {
*/
static void DdmSendThreadNotification(Thread* t, uint32_t type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void DdmSetThreadNotification(bool enable);
+ static void DdmSetThreadNotification(bool enable)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_);
static bool DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen);
static void DdmConnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void DdmDisconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -446,17 +463,21 @@ class Dbg {
* Recent allocation tracking support.
*/
static void RecordAllocation(mirror::Class* type, size_t byte_count)
+ LOCKS_EXCLUDED(alloc_tracker_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void SetAllocTrackingEnabled(bool enabled);
+ static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(alloc_tracker_lock_);
static bool IsAllocTrackingEnabled() {
return recent_allocation_records_ != nullptr;
}
- static jbyteArray GetRecentAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static jbyteArray GetRecentAllocations()
+ LOCKS_EXCLUDED(alloc_tracker_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static size_t HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(alloc_tracker_lock_);
- static void DumpRecentAllocations();
+ static void DumpRecentAllocations() LOCKS_EXCLUDED(alloc_tracker_lock_);
// Updates the stored direct object pointers (called from SweepSystemWeaks).
static void UpdateObjectPointers(IsMarkedCallback* callback, void* arg)
+ LOCKS_EXCLUDED(alloc_tracker_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
enum HpifWhen {
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index fe338063bb..0c8a4f044b 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -835,7 +835,7 @@ bool ElfFile::Load(bool executable, std::string* error_msg) {
if ((program_header.p_flags & PF_R) != 0) {
prot |= PROT_READ;
}
- int flags = MAP_FIXED;
+ int flags = 0;
if (writable_) {
prot |= PROT_WRITE;
flags |= MAP_SHARED;
@@ -853,7 +853,7 @@ bool ElfFile::Load(bool executable, std::string* error_msg) {
program_header.p_memsz,
prot, flags, file_->Fd(),
program_header.p_offset,
- true,
+ true, // implies MAP_FIXED
file_->GetPath().c_str(),
error_msg));
if (segment.get() == nullptr) {
@@ -978,7 +978,11 @@ void ElfFile::GdbJITSupport() {
!check_section_name(all, 11, ".debug_str")) {
return;
}
-
+#ifdef __LP64__
+ if (true) {
+ return; // No ELF debug support in 64bit.
+ }
+#endif
// This is not needed if we have no .text segment.
uint32_t text_start_addr = 0;
for (uint32_t i = 0; i < segments_.size(); i++) {
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 2067a455af..a0ba6b9846 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -16,10 +16,10 @@
#include "class_linker.h"
#include "interpreter/interpreter.h"
-#include "invoke_arg_array_builder.h"
#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
#include "object_utils.h"
+#include "reflection.h"
#include "runtime.h"
#include "stack.h"
@@ -46,9 +46,7 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& m
}
uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
if (kUsePortableCompiler) {
- ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
- arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset);
- method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty());
+ InvokeWithShadowFrame(self, shadow_frame, arg_offset, mh, result);
} else {
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 08de95f255..184e5e9879 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -21,13 +21,13 @@
#include "entrypoints/entrypoint_utils.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
-#include "invoke_arg_array_builder.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "runtime.h"
+#include "scoped_thread_state_change.h"
namespace art {
@@ -434,38 +434,7 @@ class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE {
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimLong: // Fall-through.
- case Primitive::kPrimDouble:
- if (IsSplitLongOrDouble()) {
- sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
- } else {
- sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
- }
- ++cur_reg_;
- break;
- case Primitive::kPrimNot: {
- StackReference<mirror::Object>* stack_ref =
- reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
- }
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- case Primitive::kPrimFloat:
- sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- break;
- }
- ++cur_reg_;
- }
+ void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
private:
ShadowFrame* const sf_;
@@ -474,6 +443,39 @@ class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
DISALLOW_COPY_AND_ASSIGN(BuildQuickShadowFrameVisitor);
};
+void BuildQuickShadowFrameVisitor::Visit() {
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ if (IsSplitLongOrDouble()) {
+ sf_->SetVRegLong(cur_reg_, ReadSplitLongParam());
+ } else {
+ sf_->SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
+ }
+ ++cur_reg_;
+ break;
+ case Primitive::kPrimNot: {
+ StackReference<mirror::Object>* stack_ref =
+ reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
+ sf_->SetVRegReference(cur_reg_, stack_ref->AsMirrorPtr());
+ }
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ sf_->SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ break;
+ }
+ ++cur_reg_;
+}
+
extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -532,56 +534,61 @@ class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor {
std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE {
- jvalue val;
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimNot: {
- StackReference<mirror::Object>* stack_ref =
- reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
- references_.push_back(std::make_pair(val.l, stack_ref));
- break;
- }
- case Primitive::kPrimLong: // Fall-through.
- case Primitive::kPrimDouble:
- if (IsSplitLongOrDouble()) {
- val.j = ReadSplitLongParam();
- } else {
- val.j = *reinterpret_cast<jlong*>(GetParamAddress());
- }
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- case Primitive::kPrimFloat:
- val.i = *reinterpret_cast<jint*>(GetParamAddress());
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
- }
- args_->push_back(val);
- }
+ void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Fixup any references which may have changed.
- for (const auto& pair : references_) {
- pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
- }
- }
+ void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- ScopedObjectAccessUnchecked* soa_;
- std::vector<jvalue>* args_;
+ ScopedObjectAccessUnchecked* const soa_;
+ std::vector<jvalue>* const args_;
// References which we must update when exiting in case the GC moved the objects.
std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
+
DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
};
+void BuildQuickArgumentVisitor::Visit() {
+ jvalue val;
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimNot: {
+ StackReference<mirror::Object>* stack_ref =
+ reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
+ val.l = soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
+ references_.push_back(std::make_pair(val.l, stack_ref));
+ break;
+ }
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ if (IsSplitLongOrDouble()) {
+ val.j = ReadSplitLongParam();
+ } else {
+ val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+ }
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ val.i = *reinterpret_cast<jint*>(GetParamAddress());
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ val.j = 0;
+ break;
+ }
+ args_->push_back(val);
+}
+
+void BuildQuickArgumentVisitor::FixupReferences() {
+ // Fixup any references which may have changed.
+ for (const auto& pair : references_) {
+ pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
+ }
+}
+
// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
// which is responsible for recording callee save registers. We explicitly place into jobjects the
// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
@@ -644,30 +651,35 @@ class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE {
- if (IsParamAReference()) {
- StackReference<mirror::Object>* stack_ref =
- reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- jobject reference =
- soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
- references_.push_back(std::make_pair(reference, stack_ref));
- }
- }
+ void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Fixup any references which may have changed.
- for (const auto& pair : references_) {
- pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
- }
- }
+ void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- ScopedObjectAccessUnchecked* soa_;
+ ScopedObjectAccessUnchecked* const soa_;
// References which we must update when exiting in case the GC moved the objects.
std::vector<std::pair<jobject, StackReference<mirror::Object>*> > references_;
DISALLOW_COPY_AND_ASSIGN(RememberForGcArgumentVisitor);
};
+void RememberForGcArgumentVisitor::Visit() {
+ if (IsParamAReference()) {
+ StackReference<mirror::Object>* stack_ref =
+ reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
+ jobject reference =
+ soa_->AddLocalReference<jobject>(stack_ref->AsMirrorPtr());
+ references_.push_back(std::make_pair(reference, stack_ref));
+ }
+}
+
+void RememberForGcArgumentVisitor::FixupReferences() {
+ // Fixup any references which may have changed.
+ for (const auto& pair : references_) {
+ pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
+ }
+}
+
+
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
mirror::Object* receiver,
@@ -856,9 +868,10 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
*
* void PushStack(uintptr_t): Push a value to the stack.
*
- * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. Is guaranteed != nullptr.
+ * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr,
+ * as this might be important for null initialization.
* Must return the jobject, that is, the reference to the
- * entry in the Sirt.
+ * entry in the Sirt (nullptr if necessary).
*
*/
template <class T> class BuildGenericJniFrameStateMachine {
@@ -948,12 +961,7 @@ template <class T> class BuildGenericJniFrameStateMachine {
}
void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uintptr_t sirtRef;
- if (ptr != nullptr) {
- sirtRef = PushSirt(ptr);
- } else {
- sirtRef = reinterpret_cast<uintptr_t>(nullptr);
- }
+ uintptr_t sirtRef = PushSirt(ptr);
if (HaveSirtGpr()) {
gpr_index_--;
PushGpr(sirtRef);
@@ -1032,7 +1040,6 @@ template <class T> class BuildGenericJniFrameStateMachine {
return fpr_index_ > 0;
}
- // TODO: please review this bit representation retrieving.
template <typename U, typename V> V convert(U in) {
CHECK_LE(sizeof(U), sizeof(V));
union { U u; V v; } tmp;
@@ -1155,49 +1162,49 @@ class ComputeGenericJniFrameSize FINAL {
public:
ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {}
- // (negative) offset from SP to top of Sirt.
- uint32_t GetSirtOffset() {
- return 8;
- }
-
- uint32_t GetFirstSirtEntryOffset() {
- return GetSirtOffset() + sizeof(StackReference<mirror::Object>);
- }
-
- uint32_t GetNumSirtReferences() {
- return num_sirt_references_;
- }
-
uint32_t GetStackSize() {
return num_stack_entries_ * sizeof(uintptr_t);
}
- void ComputeLayout(bool is_static, const char* shorty, uint32_t shorty_len, void* sp,
- StackReference<mirror::Object>** start_sirt, StackIndirectReferenceTable** table,
- uint32_t* sirt_entries, uintptr_t** start_stack, uintptr_t** start_gpr,
- uint32_t** start_fpr, void** code_return, size_t* overall_size)
+ // WARNING: After this, *sp won't be pointing to the method anymore!
+ void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
+ void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries,
+ uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
+ void** code_return, size_t* overall_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ComputeAll(is_static, shorty, shorty_len);
+ mirror::ArtMethod* method = **m;
+
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
- *start_sirt = reinterpret_cast<StackReference<mirror::Object>*>(sp8-GetFirstSirtEntryOffset());
-
- // Add padding entries if necessary for alignment.
- if (sizeof(uintptr_t) < sizeof(uint64_t)) {
- uint32_t size = sizeof(uintptr_t) * num_sirt_references_;
- uint32_t rem = size % 8;
- if (rem != 0) {
- DCHECK_EQ(rem, 4U);
- num_sirt_references_++;
- }
- }
+
+ // First, fix up the layout of the callee-save frame.
+ // We have to squeeze in the Sirt, and relocate the method pointer.
+
+ // "Free" the slot for the method.
+ sp8 += kPointerSize;
+
+ // Add the Sirt.
*sirt_entries = num_sirt_references_;
- size_t sirt_size = StackIndirectReferenceTable::SizeOf(num_sirt_references_);
- sp8 -= GetSirtOffset() + sirt_size;
+ size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_);
+ sp8 -= sirt_size;
*table = reinterpret_cast<StackIndirectReferenceTable*>(sp8);
+ (*table)->SetNumberOfReferences(num_sirt_references_);
+ // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
+ sp8 -= kPointerSize;
+ uint8_t* method_pointer = sp8;
+ *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method;
+ *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer);
+
+ // Reference cookie and padding
+ sp8 -= 8;
+ // Store Sirt size
+ *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF);
+
+ // Next comes the native call stack.
sp8 -= GetStackSize();
- // Now align the call stack under the Sirt. This aligns by 16.
+ // Now align the call stack below. This aligns by 16, as AArch64 seems to require.
uintptr_t mask = ~0x0F;
sp8 = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(sp8) & mask);
*start_stack = reinterpret_cast<uintptr_t*>(sp8);
@@ -1212,10 +1219,14 @@ class ComputeGenericJniFrameSize FINAL {
*start_gpr = reinterpret_cast<uintptr_t*>(sp8);
// reserve space for the code pointer
- sp8 -= sizeof(void*);
+ sp8 -= kPointerSize;
*code_return = reinterpret_cast<void*>(sp8);
*overall_size = reinterpret_cast<uint8_t*>(sp) - sp8;
+
+ // The new SP is stored at the end of the alloca, so it can be immediately popped
+ sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB;
+ *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
}
void ComputeSirtOffset() { } // nothing to do, static right now
@@ -1291,85 +1302,30 @@ class ComputeGenericJniFrameSize FINAL {
// of transitioning into native code.
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildGenericJniFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
+ BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty,
uint32_t shorty_len, Thread* self) :
- QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sm_(this) {
+ QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
ComputeGenericJniFrameSize fsc;
- fsc.ComputeLayout(is_static, shorty, shorty_len, sp, &cur_sirt_entry_, &sirt_,
- &sirt_expected_refs_, &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_,
- &code_return_, &alloca_used_size_);
+ fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_,
+ &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
+ &alloca_used_size_);
sirt_number_of_references_ = 0;
- top_of_sirt_ = cur_sirt_entry_;
+ cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry());
// jni environment is always first argument
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceSirt((*sp)->GetDeclaringClass());
+ sm_.AdvanceSirt((**sp)->GetDeclaringClass());
}
}
- void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE {
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimLong: {
- jlong long_arg;
- if (IsSplitLongOrDouble()) {
- long_arg = ReadSplitLongParam();
- } else {
- long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
- }
- sm_.AdvanceLong(long_arg);
- break;
- }
- case Primitive::kPrimDouble: {
- uint64_t double_arg;
- if (IsSplitLongOrDouble()) {
- // Read into union so that we don't case to a double.
- double_arg = ReadSplitLongParam();
- } else {
- double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
- }
- sm_.AdvanceDouble(double_arg);
- break;
- }
- case Primitive::kPrimNot: {
- StackReference<mirror::Object>* stack_ref =
- reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- sm_.AdvanceSirt(stack_ref->AsMirrorPtr());
- break;
- }
- case Primitive::kPrimFloat:
- sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- break;
- }
- }
+ void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Initialize padding entries.
- while (sirt_number_of_references_ < sirt_expected_refs_) {
- *cur_sirt_entry_ = StackReference<mirror::Object>();
- cur_sirt_entry_--;
- sirt_number_of_references_++;
- }
- sirt_->SetNumberOfReferences(sirt_expected_refs_);
- DCHECK_NE(sirt_expected_refs_, 0U);
- // Install Sirt.
- self->PushSirt(sirt_);
- }
+ void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- jobject GetFirstSirtEntry() {
- return reinterpret_cast<jobject>(top_of_sirt_);
+ jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return reinterpret_cast<jobject>(sirt_->GetStackReference(0));
}
void PushGpr(uintptr_t val) {
@@ -1394,9 +1350,15 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
}
uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
- uintptr_t tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
- cur_sirt_entry_--;
+ uintptr_t tmp;
+ if (ref == nullptr) {
+ *cur_sirt_entry_ = StackReference<mirror::Object>();
+ tmp = reinterpret_cast<uintptr_t>(nullptr);
+ } else {
+ *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
+ tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
+ }
+ cur_sirt_entry_++;
sirt_number_of_references_++;
return tmp;
}
@@ -1418,7 +1380,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
uintptr_t* cur_gpr_reg_;
uint32_t* cur_fpr_reg_;
uintptr_t* cur_stack_arg_;
- StackReference<mirror::Object>* top_of_sirt_;
+ // StackReference<mirror::Object>* top_of_sirt_;
void* code_return_;
size_t alloca_used_size_;
@@ -1427,25 +1389,88 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
DISALLOW_COPY_AND_ASSIGN(BuildGenericJniFrameVisitor);
};
+void BuildGenericJniFrameVisitor::Visit() {
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimLong: {
+ jlong long_arg;
+ if (IsSplitLongOrDouble()) {
+ long_arg = ReadSplitLongParam();
+ } else {
+ long_arg = *reinterpret_cast<jlong*>(GetParamAddress());
+ }
+ sm_.AdvanceLong(long_arg);
+ break;
+ }
+ case Primitive::kPrimDouble: {
+ uint64_t double_arg;
+ if (IsSplitLongOrDouble()) {
+ // Read into union so that we don't case to a double.
+ double_arg = ReadSplitLongParam();
+ } else {
+ double_arg = *reinterpret_cast<uint64_t*>(GetParamAddress());
+ }
+ sm_.AdvanceDouble(double_arg);
+ break;
+ }
+ case Primitive::kPrimNot: {
+ StackReference<mirror::Object>* stack_ref =
+ reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
+ sm_.AdvanceSirt(stack_ref->AsMirrorPtr());
+ break;
+ }
+ case Primitive::kPrimFloat:
+ sm_.AdvanceFloat(*reinterpret_cast<float*>(GetParamAddress()));
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ sm_.AdvanceInt(*reinterpret_cast<jint*>(GetParamAddress()));
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ break;
+ }
+}
+
+void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) {
+ // Initialize padding entries.
+ while (sirt_number_of_references_ < sirt_expected_refs_) {
+ *cur_sirt_entry_ = StackReference<mirror::Object>();
+ cur_sirt_entry_++;
+ sirt_number_of_references_++;
+ }
+ sirt_->SetNumberOfReferences(sirt_expected_refs_);
+ DCHECK_NE(sirt_expected_refs_, 0U);
+ // Install Sirt.
+ self->PushSirt(sirt_);
+}
+
+extern "C" void* artFindNativeMethod();
+
/*
* Initializes an alloca region assumed to be directly below sp for a native call:
* Create a Sirt and call stack and fill a mini stack with values to be pushed to registers.
* The final element on the stack is a pointer to the native code.
*
+ * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
+ * We need to fix this, as the Sirt needs to go into the callee-save frame.
+ *
* The return of this function denotes:
* 1) How many bytes of the alloca can be released, if the value is non-negative.
* 2) An error, if the value is negative.
*/
extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
mirror::ArtMethod* called = *sp;
- DCHECK(called->IsNative());
+ DCHECK(called->IsNative()) << PrettyMethod(called, true);
// run the visitor
MethodHelper mh(called);
- BuildGenericJniFrameVisitor visitor(sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
+ BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
self);
visitor.VisitArguments();
visitor.FinalizeSirt(self);
@@ -1455,7 +1480,7 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
self->VerifyStack();
- // start JNI, save the cookie
+ // Start JNI, save the cookie.
uint32_t cookie;
if (called->IsSynchronized()) {
cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self);
@@ -1467,21 +1492,35 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
} else {
cookie = JniMethodStart(self);
}
+ uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
*(sp32 - 1) = cookie;
- // retrieve native code
+ // Retrieve the stored native code.
const void* nativeCode = called->GetNativeMethod();
- if (nativeCode == nullptr) {
- // TODO: is this really an error, or do we need to try to find native code?
- LOG(FATAL) << "Finding native code not implemented yet.";
+
+ // There are two cases for the content of nativeCode:
+ // 1) Pointer to the native function.
+ // 2) Pointer to the trampoline for native code binding.
+ // In the second case, we need to execute the binding and continue with the actual native function
+ // pointer.
+ DCHECK(nativeCode != nullptr);
+ if (nativeCode == GetJniDlsymLookupStub()) {
+ nativeCode = artFindNativeMethod();
+
+ if (nativeCode == nullptr) {
+ DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
+ return -1;
+ }
+ // Note that the native code pointer will be automatically set by artFindNativeMethod().
}
+ // Store the native code pointer in the stack at the right location.
uintptr_t* code_pointer = reinterpret_cast<uintptr_t*>(visitor.GetCodeReturn());
- size_t window_size = visitor.GetAllocaUsedSize();
*code_pointer = reinterpret_cast<uintptr_t>(nativeCode);
- // 5K reserved, window_size used.
- return (5 * KB) - window_size;
+ // 5K reserved, window_size + frame pointer used.
+ size_t window_size = visitor.GetAllocaUsedSize();
+ return (5 * KB) - window_size - kPointerSize;
}
/*
@@ -1501,10 +1540,10 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet
if (return_shorty_char == 'L') {
// the only special ending call
if (called->IsSynchronized()) {
- ComputeGenericJniFrameSize fsc;
- fsc.ComputeSirtOffset();
- uint32_t offset = fsc.GetFirstSirtEntryOffset();
- jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp) - offset);
+ StackIndirectReferenceTable* table =
+ reinterpret_cast<StackIndirectReferenceTable*>(
+ reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0));
return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(result.l, cookie, tmp,
self));
@@ -1513,10 +1552,10 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet
}
} else {
if (called->IsSynchronized()) {
- ComputeGenericJniFrameSize fsc;
- fsc.ComputeSirtOffset();
- uint32_t offset = fsc.GetFirstSirtEntryOffset();
- jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp) - offset);
+ StackIndirectReferenceTable* table =
+ reinterpret_cast<StackIndirectReferenceTable*>(
+ reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0));
JniMethodEndSynchronized(cookie, tmp, self);
} else {
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 5e3f5043c1..9c76a1480f 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -76,7 +76,7 @@ class ExceptionTest : public CommonRuntimeTest {
method_f_ = my_klass_->FindVirtualMethod("f", "()I");
ASSERT_TRUE(method_f_ != NULL);
- method_f_->SetFrameSizeInBytes(kStackAlignment);
+ method_f_->SetFrameSizeInBytes(4 * kPointerSize);
method_f_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
method_f_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
method_f_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
@@ -84,7 +84,7 @@ class ExceptionTest : public CommonRuntimeTest {
method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
ASSERT_TRUE(method_g_ != NULL);
- method_g_->SetFrameSizeInBytes(kStackAlignment);
+ method_g_->SetFrameSizeInBytes(4 * kPointerSize);
method_g_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
method_g_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
method_g_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
@@ -151,7 +151,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
std::vector<uintptr_t> fake_stack;
ASSERT_EQ(kStackAlignment, 16U);
- ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
+ // ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
if (!kUsePortableCompiler) {
// Create two fake stack frames with mapping data created in SetUp. We map offset 3 in the code
@@ -201,7 +201,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
jobject internal = thread->CreateInternalStackTrace(soa);
ASSERT_TRUE(internal != NULL);
- jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(env, internal);
+ jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal);
ASSERT_TRUE(ste_array != NULL);
mirror::ObjectArray<mirror::StackTraceElement>* trace_array =
soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(ste_array);
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
new file mode 100644
index 0000000000..f9f3e2576e
--- /dev/null
+++ b/runtime/fault_handler.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fault_handler.h"
+#include <sys/mman.h>
+#include <sys/ucontext.h>
+#include "base/macros.h"
+#include "globals.h"
+#include "base/logging.h"
+#include "base/hex_dump.h"
+#include "thread.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "scoped_thread_state_change.h"
+#include "verify_object-inl.h"
+
+namespace art {
+// Static fault manger object accessed by signal handler.
+FaultManager fault_manager;
+
+// Signal handler called on SIGSEGV.
+static void art_fault_handler(int sig, siginfo_t* info, void* context) {
+ fault_manager.HandleFault(sig, info, context);
+}
+
+FaultManager::FaultManager() {
+ sigaction(SIGSEGV, nullptr, &oldaction_);
+}
+
+FaultManager::~FaultManager() {
+ sigaction(SIGSEGV, &oldaction_, nullptr); // Restore old handler.
+}
+
+void FaultManager::Init() {
+ struct sigaction action;
+ action.sa_sigaction = art_fault_handler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO | SA_ONSTACK;
+#if !defined(__mips__)
+ action.sa_restorer = nullptr;
+#endif
+ sigaction(SIGSEGV, &action, &oldaction_);
+}
+
+void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
+ bool handled = false;
+ if (IsInGeneratedCode(context)) {
+ for (auto& handler : handlers_) {
+ handled = handler->Action(sig, info, context);
+ if (handled) {
+ return;
+ }
+ }
+ }
+
+ if (!handled) {
+ LOG(INFO)<< "Caught unknown SIGSEGV in ART fault handler";
+ oldaction_.sa_sigaction(sig, info, context);
+ }
+}
+
+void FaultManager::AddHandler(FaultHandler* handler) {
+ handlers_.push_back(handler);
+}
+
+void FaultManager::RemoveHandler(FaultHandler* handler) {
+ for (Handlers::iterator i = handlers_.begin(); i != handlers_.end(); ++i) {
+ FaultHandler* h = *i;
+ if (h == handler) {
+ handlers_.erase(i);
+ return;
+ }
+ }
+}
+
+
+// This function is called within the signal handler. It checks that
+// the mutator_lock is held (shared). No annotalysis is done.
+bool FaultManager::IsInGeneratedCode(void *context) {
+ // We can only be running Java code in the current thread if it
+ // is in Runnable state.
+ Thread* thread = Thread::Current();
+ if (thread == nullptr) {
+ return false;
+ }
+
+ ThreadState state = thread->GetState();
+ if (state != kRunnable) {
+ return false;
+ }
+
+ // Current thread is runnable.
+ // Make sure it has the mutator lock.
+ if (!Locks::mutator_lock_->IsSharedHeld(thread)) {
+ return false;
+ }
+
+ uintptr_t potential_method = 0;
+ uintptr_t return_pc = 0;
+
+ // Get the architecture specific method address and return address. These
+ // are in architecture specific files in arch/<arch>/fault_handler_<arch>.cc
+ GetMethodAndReturnPC(context, /*out*/potential_method, /*out*/return_pc);
+
+ // If we don't have a potential method, we're outta here.
+ if (potential_method == 0) {
+ return false;
+ }
+
+ // Verify that the potential method is indeed a method.
+ // TODO: check the GC maps to make sure it's an object.
+
+ mirror::Object* method_obj =
+ reinterpret_cast<mirror::Object*>(potential_method);
+
+ // Check that the class pointer inside the object is not null and is aligned.
+ mirror::Class* cls = method_obj->GetClass<kVerifyNone>();
+ if (cls == nullptr) {
+ return false;
+ }
+ if (!IsAligned<kObjectAlignment>(cls)) {
+ return false;
+ }
+
+
+ if (!VerifyClassClass(cls)) {
+ return false;
+ }
+
+ // Now make sure the class is a mirror::ArtMethod.
+ if (!cls->IsArtMethodClass()) {
+ return false;
+ }
+
+ // We can be certain that this is a method now. Check if we have a GC map
+ // at the return PC address.
+ mirror::ArtMethod* method =
+ reinterpret_cast<mirror::ArtMethod*>(potential_method);
+ return method->ToDexPc(return_pc, false) != DexFile::kDexNoIndex;
+}
+
+//
+// Null pointer fault handler
+//
+
+NullPointerHandler::NullPointerHandler(FaultManager* manager) {
+ manager->AddHandler(this);
+}
+
+//
+// Suspension fault handler
+//
+
+SuspensionHandler::SuspensionHandler(FaultManager* manager) {
+ manager->AddHandler(this);
+}
+
+//
+// Stack overflow fault handler
+//
+
+StackOverflowHandler::StackOverflowHandler(FaultManager* manager) {
+ manager->AddHandler(this);
+}
+} // namespace art
+
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
new file mode 100644
index 0000000000..9fe6e9a58a
--- /dev/null
+++ b/runtime/fault_handler.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ART_RUNTIME_FAULT_HANDLER_H_
+#define ART_RUNTIME_FAULT_HANDLER_H_
+
+#include <signal.h>
+#include <vector>
+#include <setjmp.h>
+#include <stdint.h>
+
+#include "base/mutex.h" // For annotalysis.
+
+namespace art {
+class FaultHandler;
+
+class FaultManager {
+ public:
+ FaultManager();
+ ~FaultManager();
+
+ void Init();
+
+ void HandleFault(int sig, siginfo_t* info, void* context);
+ void AddHandler(FaultHandler* handler);
+ void RemoveHandler(FaultHandler* handler);
+
+ private:
+ bool IsInGeneratedCode(void *context) NO_THREAD_SAFETY_ANALYSIS;
+ void GetMethodAndReturnPC(void* context, uintptr_t& method, uintptr_t& return_pc);
+
+ typedef std::vector<FaultHandler*> Handlers;
+ Handlers handlers_;
+ struct sigaction oldaction_;
+};
+
+class FaultHandler {
+ public:
+ FaultHandler() : manager_(nullptr) {}
+ explicit FaultHandler(FaultManager* manager) : manager_(manager) {}
+ virtual ~FaultHandler() {}
+
+ virtual bool Action(int sig, siginfo_t* siginfo, void* context) = 0;
+ protected:
+ FaultManager* const manager_;
+};
+
+class NullPointerHandler FINAL : public FaultHandler {
+ public:
+ NullPointerHandler() {}
+ explicit NullPointerHandler(FaultManager* manager);
+
+ bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+};
+
+class SuspensionHandler FINAL : public FaultHandler {
+ public:
+ SuspensionHandler() {}
+ explicit SuspensionHandler(FaultManager* manager);
+
+ bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+};
+
+class StackOverflowHandler FINAL : public FaultHandler {
+ public:
+ StackOverflowHandler() {}
+ explicit StackOverflowHandler(FaultManager* manager);
+
+ bool Action(int sig, siginfo_t* siginfo, void* context) OVERRIDE;
+};
+
+// Statically allocated so the the signal handler can get access to it.
+extern FaultManager fault_manager;
+
+} // namespace art
+#endif // ART_RUNTIME_FAULT_HANDLER_H_
+
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index e13bd715c2..ace9f9e425 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -48,8 +48,8 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
page_release_mode_(page_release_mode),
page_release_size_threshold_(page_release_size_threshold) {
- DCHECK(RoundUp(capacity, kPageSize) == capacity);
- DCHECK(RoundUp(max_capacity, kPageSize) == max_capacity);
+ DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
+ DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
CHECK_LE(capacity, max_capacity);
CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
if (!initialized_) {
@@ -151,7 +151,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
// There is a free page run at the end.
DCHECK(last_free_page_run->IsFree());
- DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
+ DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
last_free_page_run_size = last_free_page_run->ByteSize(this);
} else {
// There is no free page run at the end.
@@ -176,7 +176,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
- DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
+ DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
} else {
// Otherwise, insert a new free page run at the end.
FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
@@ -186,7 +186,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
new_free_page_run->SetByteSize(this, increment);
DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
free_page_runs_.insert(new_free_page_run);
- DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
+ DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
<< std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
@@ -240,7 +240,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
// Update the page map.
size_t page_map_idx = ToPageMapIndex(res);
for (size_t i = 0; i < num_pages; i++) {
- DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
+ DCHECK_EQ(page_map_[page_map_idx + i], kPageMapEmpty);
}
switch (page_map_type) {
case kPageMapRun:
@@ -282,7 +282,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
void RosAlloc::FreePages(Thread* self, void* ptr) {
lock_.AssertHeld(self);
size_t pm_idx = ToPageMapIndex(ptr);
- DCHECK(pm_idx < page_map_size_);
+ DCHECK_LT(pm_idx, page_map_size_);
byte pm_type = page_map_[pm_idx];
DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
byte pm_part_type;
@@ -425,7 +425,7 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
}
void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
- DCHECK(size > kLargeSizeThreshold);
+ DCHECK_GT(size, kLargeSizeThreshold);
size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
void* r;
{
@@ -461,13 +461,14 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
void RosAlloc::FreeInternal(Thread* self, void* ptr) {
- DCHECK(base_ <= ptr && ptr < base_ + footprint_);
+ DCHECK_LE(base_, ptr);
+ DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
bool free_from_run = false;
Run* run = NULL;
{
MutexLock mu(self, lock_);
- DCHECK(pm_idx < page_map_size_);
+ DCHECK_LT(pm_idx, page_map_size_);
byte page_map_entry = page_map_[pm_idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
@@ -491,11 +492,11 @@ void RosAlloc::FreeInternal(Thread* self, void* ptr) {
// Find the beginning of the run.
while (page_map_[pi] != kPageMapRun) {
pi--;
- DCHECK(pi < capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / kPageSize);
}
- DCHECK(page_map_[pi] == kPageMapRun);
+ DCHECK_EQ(page_map_[pi], kPageMapRun);
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
break;
}
default:
@@ -551,13 +552,13 @@ RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
}
void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
- DCHECK(size <= kLargeSizeThreshold);
+ DCHECK_LE(size, kLargeSizeThreshold);
size_t bracket_size;
size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
DCHECK_EQ(idx, SizeToIndex(size));
DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
DCHECK_EQ(bracket_size, bracketSizes[idx]);
- DCHECK(size <= bracket_size);
+ DCHECK_LE(size, bracket_size);
DCHECK(size > 512 || bracket_size - size < 16);
void* slot_addr;
@@ -693,8 +694,9 @@ void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
}
void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
- DCHECK(run->magic_num_ == kMagicNum);
- DCHECK(run < ptr && ptr < run->End());
+ DCHECK_EQ(run->magic_num_, kMagicNum);
+ DCHECK_LT(run, ptr);
+ DCHECK_LT(ptr, run->End());
size_t idx = run->size_bracket_idx_;
MutexLock mu(self, *size_bracket_locks_[idx]);
bool run_was_full = false;
@@ -858,11 +860,11 @@ inline void RosAlloc::Run::FreeSlot(void* ptr) {
- (reinterpret_cast<byte*>(this) + headerSizes[idx]);
DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
- DCHECK(slot_idx < numOfSlots[idx]);
+ DCHECK_LT(slot_idx, numOfSlots[idx]);
size_t vec_idx = slot_idx / 32;
if (kIsDebugBuild) {
size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
- DCHECK(vec_idx < num_vec);
+ DCHECK_LT(vec_idx, num_vec);
}
size_t vec_off = slot_idx % 32;
uint32_t* vec = &alloc_bit_map_[vec_idx];
@@ -960,11 +962,11 @@ inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_ma
- (reinterpret_cast<byte*>(this) + headerSizes[idx]);
DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
- DCHECK(slot_idx < numOfSlots[idx]);
+ DCHECK_LT(slot_idx, numOfSlots[idx]);
size_t vec_idx = slot_idx / 32;
if (kIsDebugBuild) {
size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
- DCHECK(vec_idx < num_vec);
+ DCHECK_LT(vec_idx, num_vec);
}
size_t vec_off = slot_idx % 32;
uint32_t* vec = &free_bit_map_base[vec_idx];
@@ -997,11 +999,13 @@ inline bool RosAlloc::Run::IsFull() {
size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
- DCHECK(num_slots >= slots);
+ DCHECK_GE(num_slots, slots);
uint32_t vec = alloc_bit_map_[v];
uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
: (1 << (num_slots - slots)) - 1;
- DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
+ if ((num_slots - slots) >= 32) {
+ DCHECK_EQ(mask, static_cast<uint32_t>(-1));
+ }
if (vec != mask) {
return false;
}
@@ -1052,7 +1056,7 @@ void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size
size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
- DCHECK(num_slots >= slots);
+ DCHECK_GE(num_slots, slots);
uint32_t vec = alloc_bit_map_[v];
size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
for (size_t i = 0; i < end; ++i) {
@@ -1094,7 +1098,8 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
for (size_t i = 0; i < num_ptrs; i++) {
void* ptr = ptrs[i];
ptrs[i] = NULL;
- DCHECK(base_ <= ptr && ptr < base_ + footprint_);
+ DCHECK_LE(base_, ptr);
+ DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
Run* run = NULL;
if (kReadPageMapEntryWithoutLockInBulkFree) {
@@ -1107,18 +1112,18 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
}
if (LIKELY(page_map_entry == kPageMapRun)) {
run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (LIKELY(page_map_entry == kPageMapRunPart)) {
size_t pi = pm_idx;
DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
// Find the beginning of the run.
while (page_map_[pi] != kPageMapRun) {
pi--;
- DCHECK(pi < capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / kPageSize);
}
- DCHECK(page_map_[pi] == kPageMapRun);
+ DCHECK_EQ(page_map_[pi], kPageMapRun);
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
MutexLock mu(self, lock_);
FreePages(self, ptr);
@@ -1142,7 +1147,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
bool free_from_run = false;
{
MutexLock mu(self, lock_);
- DCHECK(pm_idx < page_map_size_);
+ DCHECK_LT(pm_idx, page_map_size_);
byte page_map_entry = page_map_[pm_idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
@@ -1152,7 +1157,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
if (LIKELY(page_map_entry == kPageMapRun)) {
free_from_run = true;
run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (LIKELY(page_map_entry == kPageMapRunPart)) {
free_from_run = true;
size_t pi = pm_idx;
@@ -1160,11 +1165,11 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
// Find the beginning of the run.
while (page_map_[pi] != kPageMapRun) {
pi--;
- DCHECK(pi < capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / kPageSize);
}
- DCHECK(page_map_[pi] == kPageMapRun);
+ DCHECK_EQ(page_map_[pi], kPageMapRun);
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
FreePages(self, ptr);
} else {
@@ -1393,7 +1398,8 @@ std::string RosAlloc::DumpPageMap() {
}
size_t RosAlloc::UsableSize(void* ptr) {
- DCHECK(base_ <= ptr && ptr < base_ + footprint_);
+ DCHECK_LE(base_, ptr);
+ DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
MutexLock mu(Thread::Current(), lock_);
switch (page_map_[pm_idx]) {
@@ -1420,11 +1426,11 @@ size_t RosAlloc::UsableSize(void* ptr) {
// Find the beginning of the run.
while (page_map_[pm_idx] != kPageMapRun) {
pm_idx--;
- DCHECK(pm_idx < capacity_ / kPageSize);
+ DCHECK_LT(pm_idx, capacity_ / kPageSize);
}
- DCHECK(page_map_[pm_idx] == kPageMapRun);
+ DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
- (reinterpret_cast<byte*>(run) + headerSizes[idx]);
@@ -1446,7 +1452,7 @@ bool RosAlloc::Trim() {
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
// Remove the last free page run, if any.
DCHECK(last_free_page_run->IsFree());
- DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
+ DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
free_page_runs_.erase(last_free_page_run);
@@ -1547,7 +1553,7 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by
case kPageMapRun: {
// The start of a run.
Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
run->InspectAllSlots(handler, arg);
size_t num_pages = numOfPages[run->size_bracket_idx_];
if (kIsDebugBuild) {
@@ -1656,7 +1662,7 @@ void RosAlloc::Initialize() {
} else if (i == kNumOfSizeBrackets - 2) {
bracketSizes[i] = 1 * KB;
} else {
- DCHECK(i == kNumOfSizeBrackets - 1);
+ DCHECK_EQ(i, kNumOfSizeBrackets - 1);
bracketSizes[i] = 2 * KB;
}
if (kTraceRosAlloc) {
@@ -1674,10 +1680,10 @@ void RosAlloc::Initialize() {
} else if (i < 32) {
numOfPages[i] = 8;
} else if (i == 32) {
- DCHECK(i = kNumOfSizeBrackets - 2);
+ DCHECK_EQ(i, kNumOfSizeBrackets - 2);
numOfPages[i] = 16;
} else {
- DCHECK(i = kNumOfSizeBrackets - 1);
+ DCHECK_EQ(i, kNumOfSizeBrackets - 1);
numOfPages[i] = 32;
}
if (kTraceRosAlloc) {
@@ -1726,7 +1732,7 @@ void RosAlloc::Initialize() {
DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
// Add the padding for the alignment remainder.
header_size += run_size % bracket_size;
- DCHECK(header_size + num_of_slots * bracket_size == run_size);
+ DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
numOfSlots[i] = num_of_slots;
headerSizes[i] = header_size;
bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
@@ -1773,7 +1779,7 @@ void RosAlloc::Verify() {
case kPageMapEmpty: {
// The start of a free page run.
FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
- DCHECK(fpr->magic_num_ == kMagicNumFree) << "Bad magic number : " << fpr->magic_num_;
+ DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
<< "An empty page must belong to the free page run set";
size_t fpr_size = fpr->ByteSize(this);
@@ -1805,7 +1811,7 @@ void RosAlloc::Verify() {
void* start = base_ + i * kPageSize;
mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
size_t obj_size = obj->SizeOf();
- CHECK(obj_size > kLargeSizeThreshold)
+ CHECK_GT(obj_size, kLargeSizeThreshold)
<< "A rosalloc large object size must be > " << kLargeSizeThreshold;
CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
<< "A rosalloc large object size " << obj_size
@@ -1822,9 +1828,9 @@ void RosAlloc::Verify() {
case kPageMapRun: {
// The start of a run.
Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum) << "Bad magic number" << run->magic_num_;
+ DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
- CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
+ CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
size_t num_pages = numOfPages[idx];
CHECK_GT(num_pages, static_cast<uintptr_t>(0))
<< "Run size must be > 0 : " << num_pages;
@@ -1858,9 +1864,9 @@ void RosAlloc::Verify() {
}
void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
- DCHECK(magic_num_ == kMagicNum) << "Bad magic number : " << Dump();
+ DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
size_t idx = size_bracket_idx_;
- CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
+ CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
size_t num_slots = numOfSlots[idx];
size_t bracket_size = IndexToBracketSize(idx);
@@ -1951,7 +1957,7 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
- DCHECK(num_slots >= slots) << "Out of bounds";
+ DCHECK_GE(num_slots, slots) << "Out of bounds";
uint32_t vec = alloc_bit_map_[v];
uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
diff --git a/runtime/gc/collector/immune_region.cc b/runtime/gc/collector/immune_region.cc
new file mode 100644
index 0000000000..9e6538456f
--- /dev/null
+++ b/runtime/gc/collector/immune_region.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "immune_region.h"
+
+#include "gc/space/space-inl.h"
+#include "mirror/object.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+ImmuneRegion::ImmuneRegion() {
+ Reset();
+}
+
+void ImmuneRegion::Reset() {
+ begin_ = nullptr;
+ end_ = nullptr;
+}
+
+bool ImmuneRegion::AddContinuousSpace(space::ContinuousSpace* space) {
+ // Bind live to mark bitmap if necessary.
+ if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+ CHECK(space->IsContinuousMemMapAllocSpace());
+ space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
+ }
+ mirror::Object* space_begin = reinterpret_cast<mirror::Object*>(space->Begin());
+ mirror::Object* space_limit = reinterpret_cast<mirror::Object*>(space->Limit());
+ if (IsEmpty()) {
+ begin_ = space_begin;
+ end_ = space_limit;
+ } else {
+ if (space_limit <= begin_) { // Space is before the immune region.
+ begin_ = space_begin;
+ } else if (space_begin >= end_) { // Space is after the immune region.
+ end_ = space_limit;
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool ImmuneRegion::ContainsSpace(const space::ContinuousSpace* space) const {
+ return
+ begin_ <= reinterpret_cast<mirror::Object*>(space->Begin()) &&
+ end_ >= reinterpret_cast<mirror::Object*>(space->Limit());
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h
new file mode 100644
index 0000000000..21d0b43f95
--- /dev/null
+++ b/runtime/gc/collector/immune_region.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_IMMUNE_REGION_H_
+#define ART_RUNTIME_GC_COLLECTOR_IMMUNE_REGION_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/space/space-inl.h"
+
+namespace art {
+namespace mirror {
+class Object;
+} // namespace mirror
+namespace gc {
+namespace space {
+class ContinuousSpace;
+} // namespace space
+
+namespace collector {
+
+// An immune region is a continuous region of memory for which all objects contained are assumed to
+// be marked. This is used as an optimization in the GC to avoid needing to test the mark bitmap of
+// the zygote, image spaces, and sometimes non moving spaces. Doing the ContainsObject check is
+// faster than doing a bitmap read. There is no support for discontinuous spaces and you need to be
+// careful that your immune region doesn't contain any large objects.
+class ImmuneRegion {
+ public:
+ ImmuneRegion();
+ void Reset();
+ bool AddContinuousSpace(space::ContinuousSpace* space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ bool ContainsSpace(const space::ContinuousSpace* space) const;
+ // Returns true if an object is inside of the immune region (assumed to be marked).
+ bool ContainsObject(const mirror::Object* obj) const ALWAYS_INLINE {
+ return obj >= begin_ && obj < end_;
+ }
+
+ private:
+ bool IsEmpty() const {
+ return begin_ == end_;
+ }
+
+ mirror::Object* begin_;
+ mirror::Object* end_;
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_IMMUNE_REGION_H_
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 71424bd886..4f3ad32546 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -44,6 +44,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
+#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array.h"
#include "mirror/object_array-inl.h"
@@ -88,51 +89,13 @@ static constexpr bool kCountJavaLangRefs = false;
static constexpr bool kCheckLocks = kDebugLocking;
static constexpr bool kVerifyRoots = kIsDebugBuild;
-void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
- // Bind live to mark bitmap if necessary.
- if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
- CHECK(space->IsContinuousMemMapAllocSpace());
- space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
- }
-
- // Add the space to the immune region.
- // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
- // callbacks.
- if (immune_begin_ == NULL) {
- DCHECK(immune_end_ == NULL);
- SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
- reinterpret_cast<Object*>(space->End()));
- } else {
- const space::ContinuousSpace* prev_space = nullptr;
- // Find out if the previous space is immune.
- for (const space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
- if (cur_space == space) {
- break;
- }
- prev_space = cur_space;
- }
- // If previous space was immune, then extend the immune region. Relies on continuous spaces
- // being sorted by Heap::AddContinuousSpace.
- if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
- immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
- immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
- }
- }
-}
-
-bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
- return
- immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
- immune_end_ >= reinterpret_cast<Object*>(space->End());
-}
-
void MarkSweep::BindBitmaps() {
timings_.StartSplit("BindBitmaps");
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
- ImmuneSpace(space);
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
}
}
timings_.EndSplit();
@@ -144,8 +107,6 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
current_mark_bitmap_(NULL),
mark_stack_(NULL),
- immune_begin_(NULL),
- immune_end_(NULL),
live_stack_freeze_size_(0),
gc_barrier_(new Barrier(0)),
large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
@@ -158,7 +119,7 @@ void MarkSweep::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
mark_stack_ = heap_->mark_stack_.get();
DCHECK(mark_stack_ != nullptr);
- SetImmuneRange(nullptr, nullptr);
+ immune_region_.Reset();
class_count_ = 0;
array_count_ = 0;
other_count_ = 0;
@@ -298,7 +259,7 @@ void MarkSweep::MarkingPhase() {
void MarkSweep::UpdateAndMarkModUnion() {
for (const auto& space : heap_->GetContinuousSpaces()) {
- if (IsImmuneSpace(space)) {
+ if (immune_region_.ContainsSpace(space)) {
const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable";
TimingLogger::ScopedSplit split(name, &timings_);
@@ -385,11 +346,6 @@ void MarkSweep::ReclaimPhase() {
}
}
-void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
- immune_begin_ = begin;
- immune_end_ = end;
-}
-
void MarkSweep::FindDefaultMarkBitmap() {
TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -442,7 +398,7 @@ mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
}
inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
- DCHECK(!IsImmune(obj));
+ DCHECK(!immune_region_.ContainsObject(obj));
if (kUseBrooksPointer) {
// Verify all the objects have the correct Brooks pointer installed.
@@ -474,7 +430,7 @@ inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
obj->AssertSelfBrooksPointer();
}
- if (IsImmune(obj)) {
+ if (immune_region_.ContainsObject(obj)) {
DCHECK(IsMarked(obj));
return;
}
@@ -541,7 +497,7 @@ inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
obj->AssertSelfBrooksPointer();
}
- if (IsImmune(obj)) {
+ if (immune_region_.ContainsObject(obj)) {
DCHECK(IsMarked(obj));
return false;
}
@@ -1109,7 +1065,8 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
std::vector<space::ContinuousSpace*> sweep_spaces;
space::ContinuousSpace* non_moving_space = nullptr;
for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
- if (space->IsAllocSpace() && !IsImmuneSpace(space) && space->GetLiveBitmap() != nullptr) {
+ if (space->IsAllocSpace() && !immune_region_.ContainsSpace(space) &&
+ space->GetLiveBitmap() != nullptr) {
if (space == heap_->GetNonMovingSpace()) {
non_moving_space = space;
} else {
@@ -1233,9 +1190,7 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
// the heap for later processing.
void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
DCHECK(klass != nullptr);
- DCHECK(klass->IsReferenceClass());
- DCHECK(obj != NULL);
- heap_->DelayReferenceReferent(klass, obj, IsMarkedCallback, this);
+ heap_->DelayReferenceReferent(klass, obj->AsReference(), IsMarkedCallback, this);
}
class MarkObjectVisitor {
@@ -1330,7 +1285,7 @@ void MarkSweep::ProcessMarkStack(bool paused) {
inline bool MarkSweep::IsMarked(const Object* object) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- if (IsImmune(object)) {
+ if (immune_region_.ContainsObject(object)) {
return true;
}
DCHECK(current_mark_bitmap_ != NULL);
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 8d40c34f28..df19f8868f 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
+#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "UniquePtr.h"
@@ -108,15 +109,6 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Make a space immune, immune spaces have all live objects marked - that is the mark and
- // live bitmaps are bound together.
- void ImmuneSpace(space::ContinuousSpace* space)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- bool IsImmuneSpace(const space::ContinuousSpace* space) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -165,9 +157,6 @@ class MarkSweep : public GarbageCollector {
void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor)
NO_THREAD_SAFETY_ANALYSIS;
- // Everything inside the immune range is assumed to be marked.
- void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
-
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
@@ -266,11 +255,6 @@ class MarkSweep : public GarbageCollector {
// whether or not we care about pauses.
size_t GetThreadCount(bool paused) const;
- // Returns true if an object is inside of the immune region (assumed to be marked).
- bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
- return obj >= immune_begin_ && obj < immune_end_;
- }
-
static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor *visitor);
@@ -354,8 +338,7 @@ class MarkSweep : public GarbageCollector {
accounting::ObjectStack* mark_stack_;
// Immune range, every object inside the immune range is assumed to be marked.
- mirror::Object* immune_begin_;
- mirror::Object* immune_end_;
+ ImmuneRegion immune_region_;
// Parallel finger.
AtomicInteger atomic_finger_;
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index 8ec28f3174..15f782aea8 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -39,7 +39,7 @@ void PartialMarkSweep::BindBitmaps() {
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
CHECK(space->IsZygoteSpace());
- ImmuneSpace(space);
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 2da360f3a0..23b155cffc 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -44,6 +44,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
+#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array.h"
#include "mirror/object_array-inl.h"
@@ -64,40 +65,6 @@ static constexpr bool kProtectFromSpace = true;
static constexpr bool kClearFromSpace = true;
static constexpr bool kStoreStackTraces = false;
-// TODO: Unduplicate logic.
-void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
- // Bind live to mark bitmap if necessary.
- if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
- CHECK(space->IsContinuousMemMapAllocSpace());
- space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
- }
- // Add the space to the immune region.
- if (immune_begin_ == nullptr) {
- DCHECK(immune_end_ == nullptr);
- immune_begin_ = reinterpret_cast<Object*>(space->Begin());
- immune_end_ = reinterpret_cast<Object*>(space->End());
- } else {
- const space::ContinuousSpace* prev_space = nullptr;
- // Find out if the previous space is immune.
- for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
- if (cur_space == space) {
- break;
- }
- prev_space = cur_space;
- }
- // If previous space was immune, then extend the immune region. Relies on continuous spaces
- // being sorted by Heap::AddContinuousSpace.
- if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
- immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
- // Use Limit() instead of End() because otherwise if the
- // generational mode is enabled, the alloc space might expand
- // due to promotion and the sense of immunity may change in the
- // middle of a GC.
- immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_);
- }
- }
-}
-
void SemiSpace::BindBitmaps() {
timings_.StartSplit("BindBitmaps");
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
@@ -115,7 +82,7 @@ void SemiSpace::BindBitmaps() {
|| (generational_ && !whole_heap_collection_ &&
(space == GetHeap()->GetNonMovingSpace() ||
space == GetHeap()->GetPrimaryFreeListSpace()))) {
- ImmuneSpace(space);
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
}
}
}
@@ -130,8 +97,6 @@ SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_pref
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
mark_stack_(nullptr),
- immune_begin_(nullptr),
- immune_end_(nullptr),
is_large_object_space_immune_(false),
to_space_(nullptr),
to_space_live_bitmap_(nullptr),
@@ -150,8 +115,7 @@ void SemiSpace::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
mark_stack_ = heap_->mark_stack_.get();
DCHECK(mark_stack_ != nullptr);
- immune_begin_ = nullptr;
- immune_end_ = nullptr;
+ immune_region_.Reset();
is_large_object_space_immune_ = false;
saved_bytes_ = 0;
self_ = Thread::Current();
@@ -238,16 +202,10 @@ void SemiSpace::MarkingPhase() {
MarkReachableObjects();
}
-bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
- return
- immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
- immune_end_ >= reinterpret_cast<Object*>(space->End());
-}
-
void SemiSpace::UpdateAndMarkModUnion() {
for (auto& space : heap_->GetContinuousSpaces()) {
// If the space is immune then we need to mark the references to other spaces.
- if (IsImmuneSpace(space)) {
+ if (immune_region_.ContainsSpace(space)) {
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
if (table != nullptr) {
// TODO: Improve naming.
@@ -295,7 +253,8 @@ void SemiSpace::MarkReachableObjects() {
// enabled,) then we need to scan its live bitmap as roots
// (including the objects on the live stack which have just marked
// in the live bitmap above in MarkAllocStackAsLive().)
- if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) {
+ if (immune_region_.ContainsSpace(space) &&
+ heap_->FindModUnionTableFromSpace(space) == nullptr) {
DCHECK(generational_ && !whole_heap_collection_ &&
(space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -556,7 +515,7 @@ Object* SemiSpace::MarkObject(Object* obj) {
}
}
Object* forward_address = obj;
- if (obj != nullptr && !IsImmune(obj)) {
+ if (obj != nullptr && !immune_region_.ContainsObject(obj)) {
if (from_space_->HasAddress(obj)) {
forward_address = GetForwardingAddressInFromSpace(obj);
// If the object has already been moved, return the new forward address.
@@ -634,7 +593,7 @@ void SemiSpace::SweepSystemWeaks() {
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
- return space != from_space_ && space != to_space_ && !IsImmuneSpace(space);
+ return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
}
void SemiSpace::Sweep(bool swap_bitmaps) {
@@ -675,7 +634,7 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
- heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
+ heap_->DelayReferenceReferent(klass, obj->AsReference(), MarkedForwardingAddressCallback, this);
}
class SemiSpaceMarkObjectVisitor {
@@ -744,7 +703,7 @@ void SemiSpace::ProcessMarkStack() {
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
- if (IsImmune(obj)) {
+ if (immune_region_.ContainsObject(obj)) {
return obj;
}
if (from_space_->HasAddress(obj)) {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index c164c5fc09..be7ec05f2a 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -18,10 +18,10 @@
#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
#include "atomic.h"
-#include "barrier.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
+#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "UniquePtr.h"
@@ -104,12 +104,6 @@ class SemiSpace : public GarbageCollector {
void MarkRoots()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- // Make a space immune, immune spaces have all live objects marked - that is the mark and
- // live bitmaps are bound together.
- void ImmuneSpace(space::ContinuousSpace* space)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -174,13 +168,6 @@ class SemiSpace : public GarbageCollector {
// Returns true if we should sweep the space.
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
- // Returns true if an object is inside of the immune region (assumed to be marked).
- bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
- return obj >= immune_begin_ && obj < immune_end_;
- }
-
- bool IsImmuneSpace(const space::ContinuousSpace* space) const;
-
static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor *visitor);
@@ -257,9 +244,8 @@ class SemiSpace : public GarbageCollector {
// object.
accounting::ObjectStack* mark_stack_;
- // Immune range, every object inside the immune range is assumed to be marked.
- mirror::Object* immune_begin_;
- mirror::Object* immune_end_;
+ // Immune region, every object inside the immune region is assumed to be marked.
+ ImmuneRegion immune_region_;
// If true, the large object space is immune.
bool is_large_object_space_immune_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d962f3ce55..45904ffca0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -48,14 +48,15 @@
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "heap-inl.h"
#include "image.h"
-#include "invoke_arg_array_builder.h"
#include "mirror/art_field-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/reference-inl.h"
#include "object_utils.h"
#include "os.h"
+#include "reflection.h"
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
@@ -92,7 +93,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
background_collector_type_(background_collector_type),
desired_collector_type_(collector_type_),
heap_trim_request_lock_(nullptr),
- heap_trim_target_time_(0),
+ last_trim_time_(0),
heap_transition_target_time_(0),
heap_trim_request_pending_(false),
parallel_gc_threads_(parallel_gc_threads),
@@ -103,11 +104,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
ignore_max_footprint_(ignore_max_footprint),
have_zygote_space_(false),
large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled.
- soft_reference_queue_(this),
- weak_reference_queue_(this),
- finalizer_reference_queue_(this),
- phantom_reference_queue_(this),
- cleared_references_(this),
collector_type_running_(kCollectorTypeNone),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -144,11 +140,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
current_non_moving_allocator_(kAllocatorTypeNonMoving),
bump_pointer_space_(nullptr),
temp_space_(nullptr),
- reference_referent_offset_(0),
- reference_queue_offset_(0),
- reference_queueNext_offset_(0),
- reference_pendingNext_offset_(0),
- finalizer_reference_zombie_offset_(0),
min_free_(min_free),
max_free_(max_free),
target_utilization_(target_utilization),
@@ -484,10 +475,11 @@ void Heap::UpdateProcessState(ProcessState process_state) {
process_state_ = process_state;
if (process_state_ == kProcessStateJankPerceptible) {
// Transition back to foreground right away to prevent jank.
- RequestHeapTransition(post_zygote_collector_type_, 0);
+ RequestCollectorTransition(post_zygote_collector_type_, 0);
} else {
// Don't delay for debug builds since we may want to stress test the GC.
- RequestHeapTransition(background_collector_type_, kIsDebugBuild ? 0 : kHeapTransitionWait);
+ RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
+ kCollectorTransitionWait);
}
}
}
@@ -791,29 +783,12 @@ void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
timings.EndSplit();
}
-bool Heap::IsEnqueued(mirror::Object* ref) const {
- // Since the references are stored as cyclic lists it means that once enqueued, the pending next
- // will always be non-null.
- return ref->GetFieldObject<mirror::Object>(GetReferencePendingNextOffset(), false) != nullptr;
-}
-
-bool Heap::IsEnqueuable(mirror::Object* ref) const {
- DCHECK(ref != nullptr);
- const mirror::Object* queue =
- ref->GetFieldObject<mirror::Object>(GetReferenceQueueOffset(), false);
- const mirror::Object* queue_next =
- ref->GetFieldObject<mirror::Object>(GetReferenceQueueNextOffset(), false);
- return queue != nullptr && queue_next == nullptr;
-}
-
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
+void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
IsMarkedCallback is_marked_callback, void* arg) {
- DCHECK(klass != nullptr);
- DCHECK(klass->IsReferenceClass());
- DCHECK(obj != nullptr);
- mirror::Object* referent = GetReferenceReferent(obj);
+ DCHECK_EQ(klass, ref->GetClass());
+ mirror::Object* referent = ref->GetReferent();
if (referent != nullptr) {
mirror::Object* forward_address = is_marked_callback(referent, arg);
// Null means that the object is not currently marked.
@@ -823,20 +798,20 @@ void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
// We need to check that the references haven't already been enqueued since we can end up
// scanning the same reference multiple times due to dirty cards.
if (klass->IsSoftReferenceClass()) {
- soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
} else if (klass->IsWeakReferenceClass()) {
- weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
} else if (klass->IsFinalizerReferenceClass()) {
- finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
} else if (klass->IsPhantomReferenceClass()) {
- phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, obj);
+ phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
} else {
LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
<< klass->GetAccessFlags();
}
} else if (referent != forward_address) {
// Referent is already marked and we need to update it.
- SetReferenceReferent(obj, forward_address);
+ ref->SetReferent<false>(forward_address);
}
}
}
@@ -903,7 +878,8 @@ void Heap::DoPendingTransitionOrTrim() {
ScopedThreadStateChange tsc(self, kSleeping);
usleep(wait_time / 1000); // Usleep takes microseconds.
}
- // Transition the heap if the desired collector type is nto the same as the current collector type.
+ // Transition the collector if the desired collector type is not the same as the current
+ // collector type.
TransitionCollector(desired_collector_type);
// Do a heap trim if it is needed.
Trim();
@@ -913,9 +889,10 @@ void Heap::Trim() {
Thread* self = Thread::Current();
{
MutexLock mu(self, *heap_trim_request_lock_);
- if (!heap_trim_request_pending_ || NanoTime() < heap_trim_target_time_) {
+ if (!heap_trim_request_pending_ || last_trim_time_ + kHeapTrimWait >= NanoTime()) {
return;
}
+ last_trim_time_ = NanoTime();
heap_trim_request_pending_ = false;
}
{
@@ -1804,7 +1781,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
collector->Run(gc_cause, clear_soft_references);
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
- RequestHeapTrim(Heap::kHeapTrimWait);
+ RequestHeapTrim();
// Enqueue cleared references.
EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
@@ -2010,8 +1987,9 @@ class VerifyObjectVisitor {
VerifyReferenceVisitor visitor(heap_);
// The class doesn't count as a reference but we should verify it anyways.
collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
- if (obj->GetClass()->IsReferenceClass()) {
- visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
+ if (obj->IsReferenceInstance()) {
+ mirror::Reference* ref = obj->AsReference();
+ visitor(obj, ref->GetReferent(), mirror::Reference::ReferentOffset(), false);
}
failed_ = failed_ || visitor.Failed();
}
@@ -2473,42 +2451,12 @@ void Heap::ClearGrowthLimit() {
non_moving_space_->ClearGrowthLimit();
}
-void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
- MemberOffset reference_queue_offset,
- MemberOffset reference_queueNext_offset,
- MemberOffset reference_pendingNext_offset,
- MemberOffset finalizer_reference_zombie_offset) {
- reference_referent_offset_ = reference_referent_offset;
- reference_queue_offset_ = reference_queue_offset;
- reference_queueNext_offset_ = reference_queueNext_offset;
- reference_pendingNext_offset_ = reference_pendingNext_offset;
- finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
- CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
- CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
- CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
- CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
- CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
-}
-
-void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
- DCHECK(reference != NULL);
- DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
- reference->SetFieldObject<false, false>(reference_referent_offset_, referent, true);
-}
-
-mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
- DCHECK(reference != NULL);
- DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
- return reference->GetFieldObject<mirror::Object>(reference_referent_offset_, true);
-}
-
void Heap::AddFinalizerReference(Thread* self, mirror::Object* object) {
ScopedObjectAccess soa(self);
- JValue result;
- ArgArray arg_array("VL", 2);
- arg_array.Append(object);
- soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self,
- arg_array.GetArray(), arg_array.GetNumBytes(), &result, "VL");
+ ScopedLocalRef<jobject> arg(self->GetJniEnv(), soa.AddLocalReference<jobject>(object));
+ jvalue args[1];
+ args[0].l = arg.get();
+ InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
}
void Heap::EnqueueClearedReferences() {
@@ -2518,11 +2466,11 @@ void Heap::EnqueueClearedReferences() {
// When a runtime isn't started there are no reference queues to care about so ignore.
if (LIKELY(Runtime::Current()->IsStarted())) {
ScopedObjectAccess soa(self);
- JValue result;
- ArgArray arg_array("VL", 2);
- arg_array.Append(cleared_references_.GetList());
- soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
- arg_array.GetArray(), arg_array.GetNumBytes(), &result, "VL");
+ ScopedLocalRef<jobject> arg(self->GetJniEnv(),
+ soa.AddLocalReference<jobject>(cleared_references_.GetList()));
+ jvalue args[1];
+ args[0].l = arg.get();
+ InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
}
cleared_references_.Clear();
}
@@ -2567,7 +2515,7 @@ void Heap::ConcurrentGC(Thread* self) {
}
}
-void Heap::RequestHeapTransition(CollectorType desired_collector_type, uint64_t delta_time) {
+void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) {
Thread* self = Thread::Current();
{
MutexLock mu(self, *heap_trim_request_lock_);
@@ -2580,7 +2528,7 @@ void Heap::RequestHeapTransition(CollectorType desired_collector_type, uint64_t
SignalHeapTrimDaemon(self);
}
-void Heap::RequestHeapTrim(uint64_t delta_time) {
+void Heap::RequestHeapTrim() {
// GC completed and now we must decide whether to request a heap trim (advising pages back to the
// kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
// a space it will hold its lock and can become a cause of jank.
@@ -2607,7 +2555,11 @@ void Heap::RequestHeapTrim(uint64_t delta_time) {
if (!CareAboutPauseTimes()) {
{
MutexLock mu(self, *heap_trim_request_lock_);
- heap_trim_target_time_ = std::max(heap_trim_target_time_, NanoTime() + delta_time);
+ if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
+ // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
+ // just yet.
+ return;
+ }
heap_trim_request_pending_ = true;
}
// Notify the daemon thread which will actually do the heap trim.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 797f44cd8c..1e0a59649c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -135,9 +135,10 @@ class Heap {
// Used so that we don't overflow the allocation time atomic integer.
static constexpr size_t kTimeAdjust = 1024;
- // How long we wait after a GC to perform a heap trim (nanoseconds).
+ // How often we allow heap trimming to happen (nanoseconds).
static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
- static constexpr uint64_t kHeapTransitionWait = MsToNs(5000);
+ // How long we wait after a transition request to perform a collector transition (nanoseconds).
+ static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
// Create a heap with the requested sizes. The possible empty
// image_file_names names specify Spaces to load based on
@@ -311,26 +312,6 @@ class Heap {
return discontinuous_spaces_;
}
- void SetReferenceOffsets(MemberOffset reference_referent_offset,
- MemberOffset reference_queue_offset,
- MemberOffset reference_queueNext_offset,
- MemberOffset reference_pendingNext_offset,
- MemberOffset finalizer_reference_zombie_offset);
- MemberOffset GetReferenceReferentOffset() const {
- return reference_referent_offset_;
- }
- MemberOffset GetReferenceQueueOffset() const {
- return reference_queue_offset_;
- }
- MemberOffset GetReferenceQueueNextOffset() const {
- return reference_queueNext_offset_;
- }
- MemberOffset GetReferencePendingNextOffset() const {
- return reference_pendingNext_offset_;
- }
- MemberOffset GetFinalizerReferenceZombieOffset() const {
- return finalizer_reference_zombie_offset_;
- }
static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
void ProcessReferences(TimingLogger& timings, bool clear_soft,
IsMarkedCallback* is_marked_callback,
@@ -623,20 +604,9 @@ class Heap {
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Pushes a list of cleared references out to the managed heap.
- void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* GetReferenceReferent(mirror::Object* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ClearReferenceReferent(mirror::Object* reference)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetReferenceReferent(reference, nullptr);
- }
void EnqueueClearedReferences();
// Returns true if the reference object has not yet been enqueued.
- bool IsEnqueuable(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsEnqueued(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj,
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
IsMarkedCallback is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -648,9 +618,9 @@ class Heap {
collector::GcType WaitForGcToCompleteLocked(Thread* self)
EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
- void RequestHeapTransition(CollectorType desired_collector_type, uint64_t delta_time)
+ void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(heap_trim_request_lock_);
- void RequestHeapTrim(uint64_t delta_time) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
@@ -754,7 +724,7 @@ class Heap {
// Lock which guards heap trim requests.
Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// When we want to perform the next heap trim (nano seconds).
- uint64_t heap_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
+ uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
// When we want to perform the next heap transition (nano seconds).
uint64_t heap_transition_target_time_ GUARDED_BY(heap_trim_request_lock_);
// If we have a heap trim request pending.
@@ -917,17 +887,6 @@ class Heap {
// Temp space is the space which the semispace collector copies to.
space::BumpPointerSpace* temp_space_;
- // offset of java.lang.ref.Reference.referent
- MemberOffset reference_referent_offset_;
- // offset of java.lang.ref.Reference.queue
- MemberOffset reference_queue_offset_;
- // offset of java.lang.ref.Reference.queueNext
- MemberOffset reference_queueNext_offset_;
- // offset of java.lang.ref.Reference.pendingNext
- MemberOffset reference_pendingNext_offset_;
- // offset of java.lang.ref.FinalizerReference.zombie
- MemberOffset finalizer_reference_zombie_offset_;
-
// Minimum free guarantees that you always have at least min_free_ free bytes after growing for
// utilization, regardless of target utilization ratio.
size_t min_free_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 203701ff5e..aee7891d2f 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -20,91 +20,84 @@
#include "heap.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
namespace art {
namespace gc {
-ReferenceQueue::ReferenceQueue(Heap* heap)
+ReferenceQueue::ReferenceQueue()
: lock_("reference queue lock"),
- heap_(heap),
list_(nullptr) {
}
-void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref) {
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
DCHECK(ref != NULL);
MutexLock mu(self, lock_);
- if (!heap_->IsEnqueued(ref)) {
+ if (!ref->IsEnqueued()) {
EnqueuePendingReference(ref);
}
}
-void ReferenceQueue::EnqueueReference(mirror::Object* ref) {
- CHECK(heap_->IsEnqueuable(ref));
+void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
+ CHECK(ref->IsEnqueuable());
EnqueuePendingReference(ref);
}
-void ReferenceQueue::EnqueuePendingReference(mirror::Object* ref) {
+void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
DCHECK(ref != NULL);
- MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
- DCHECK_NE(pending_next_offset.Uint32Value(), 0U);
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
- if (Runtime::Current()->IsActiveTransaction()) {
- ref->SetFieldObject<true>(pending_next_offset, ref, false);
- } else {
- ref->SetFieldObject<false>(pending_next_offset, ref, false);
- }
list_ = ref;
} else {
- mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false);
+ mirror::Reference* head = list_->GetPendingNext();
if (Runtime::Current()->IsActiveTransaction()) {
- ref->SetFieldObject<true>(pending_next_offset, head, false);
- list_->SetFieldObject<true>(pending_next_offset, ref, false);
+ ref->SetPendingNext<true>(head);
} else {
- ref->SetFieldObject<false>(pending_next_offset, head, false);
- list_->SetFieldObject<false>(pending_next_offset, ref, false);
+ ref->SetPendingNext<false>(head);
}
}
+ if (Runtime::Current()->IsActiveTransaction()) {
+ list_->SetPendingNext<true>(ref);
+ } else {
+ list_->SetPendingNext<false>(ref);
+ }
}
-mirror::Object* ReferenceQueue::DequeuePendingReference() {
+mirror::Reference* ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- MemberOffset pending_next_offset = heap_->GetReferencePendingNextOffset();
- mirror::Object* head = list_->GetFieldObject<mirror::Object>(pending_next_offset, false);
+ mirror::Reference* head = list_->GetPendingNext();
DCHECK(head != nullptr);
- mirror::Object* ref;
+ mirror::Reference* ref;
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
if (list_ == head) {
ref = list_;
list_ = nullptr;
} else {
- mirror::Object* next = head->GetFieldObject<mirror::Object>(pending_next_offset, false);
+ mirror::Reference* next = head->GetPendingNext();
if (Runtime::Current()->IsActiveTransaction()) {
- list_->SetFieldObject<true>(pending_next_offset, next, false);
+ list_->SetPendingNext<true>(next);
} else {
- list_->SetFieldObject<false>(pending_next_offset, next, false);
+ list_->SetPendingNext<false>(next);
}
ref = head;
}
if (Runtime::Current()->IsActiveTransaction()) {
- ref->SetFieldObject<true>(pending_next_offset, nullptr, false);
+ ref->SetPendingNext<true>(nullptr);
} else {
- ref->SetFieldObject<false>(pending_next_offset, nullptr, false);
+ ref->SetPendingNext<false>(nullptr);
}
return ref;
}
void ReferenceQueue::Dump(std::ostream& os) const {
- mirror::Object* cur = list_;
+ mirror::Reference* cur = list_;
os << "Reference starting at list_=" << list_ << "\n";
while (cur != nullptr) {
- mirror::Object* pending_next =
- cur->GetFieldObject<mirror::Object>(heap_->GetReferencePendingNextOffset(), false);
+ mirror::Reference* pending_next = cur->GetPendingNext();
os << "PendingNext=" << pending_next;
- if (cur->GetClass()->IsFinalizerReferenceClass()) {
- os << " Zombie=" <<
- cur->GetFieldObject<mirror::Object>(heap_->GetFinalizerReferenceZombieOffset(), false);
+ if (cur->IsFinalizerReferenceInstance()) {
+ os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
}
os << "\n";
cur = pending_next;
@@ -115,19 +108,23 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue& cleared_references,
IsMarkedCallback* preserve_callback,
void* arg) {
while (!IsEmpty()) {
- mirror::Object* ref = DequeuePendingReference();
- mirror::Object* referent = heap_->GetReferenceReferent(ref);
+ mirror::Reference* ref = DequeuePendingReference();
+ mirror::Object* referent = ref->GetReferent();
if (referent != nullptr) {
mirror::Object* forward_address = preserve_callback(referent, arg);
if (forward_address == nullptr) {
// Referent is white, clear it.
- heap_->ClearReferenceReferent(ref);
- if (heap_->IsEnqueuable(ref)) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ ref->ClearReferent<true>();
+ } else {
+ ref->ClearReferent<false>();
+ }
+ if (ref->IsEnqueuable()) {
cleared_references.EnqueuePendingReference(ref);
}
} else if (referent != forward_address) {
// Object moved, need to updated the referent.
- heap_->SetReferenceReferent(ref, forward_address);
+ ref->SetReferent<false>(forward_address);
}
}
}
@@ -138,42 +135,43 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_referenc
MarkObjectCallback recursive_mark_callback,
void* arg) {
while (!IsEmpty()) {
- mirror::Object* ref = DequeuePendingReference();
- mirror::Object* referent = heap_->GetReferenceReferent(ref);
+ mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
+ mirror::Object* referent = ref->GetReferent();
if (referent != nullptr) {
mirror::Object* forward_address = is_marked_callback(referent, arg);
// If the referent isn't marked, mark it and update the
if (forward_address == nullptr) {
forward_address = recursive_mark_callback(referent, arg);
// If the referent is non-null the reference must queuable.
- DCHECK(heap_->IsEnqueuable(ref));
+ DCHECK(ref->IsEnqueuable());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
- ref->SetFieldObject<true>(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false);
+ ref->SetZombie<true>(forward_address);
+ ref->ClearReferent<true>();
} else {
- ref->SetFieldObject<false>(heap_->GetFinalizerReferenceZombieOffset(), forward_address, false);
+ ref->SetZombie<false>(forward_address);
+ ref->ClearReferent<false>();
}
- heap_->ClearReferenceReferent(ref);
cleared_references.EnqueueReference(ref);
} else if (referent != forward_address) {
- heap_->SetReferenceReferent(ref, forward_address);
+ ref->SetReferent<false>(forward_address);
}
}
}
}
void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback preserve_callback, void* arg) {
- ReferenceQueue cleared(heap_);
+ ReferenceQueue cleared;
while (!IsEmpty()) {
- mirror::Object* ref = DequeuePendingReference();
- mirror::Object* referent = heap_->GetReferenceReferent(ref);
+ mirror::Reference* ref = DequeuePendingReference();
+ mirror::Object* referent = ref->GetReferent();
if (referent != nullptr) {
mirror::Object* forward_address = preserve_callback(referent, arg);
if (forward_address == nullptr) {
// Either the reference isn't marked or we don't wish to preserve it.
cleared.EnqueuePendingReference(ref);
} else if (forward_address != referent) {
- heap_->SetReferenceReferent(ref, forward_address);
+ ref->SetReferent<false>(forward_address);
}
}
}
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 99314ba0ef..8d392babf6 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -31,6 +31,10 @@
#include "thread_pool.h"
namespace art {
+namespace mirror {
+class Reference;
+} // namespace mirror
+
namespace gc {
class Heap;
@@ -40,18 +44,18 @@ class Heap;
// java.lang.ref.Reference objects.
class ReferenceQueue {
public:
- explicit ReferenceQueue(Heap* heap);
+ explicit ReferenceQueue();
// Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
// since it uses a lock to avoid a race between checking for the references presence and adding
// it.
- void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Object* ref)
+ void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
// Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
// reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
// overhead.
- void EnqueueReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnqueuePendingReference(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to the
// zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
@@ -76,7 +80,7 @@ class ReferenceQueue {
void Clear() {
list_ = nullptr;
}
- mirror::Object* GetList() {
+ mirror::Reference* GetList() {
return list_;
}
@@ -84,10 +88,8 @@ class ReferenceQueue {
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
// calling AtomicEnqueueIfNotEnqueued.
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // The heap contains the reference offsets.
- Heap* const heap_;
// The actual reference list. Not a root since it will be nullptr when the GC is not running.
- mirror::Object* list_;
+ mirror::Reference* list_;
};
} // namespace gc
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index ca5b5a9b79..bb52c66905 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -67,6 +67,37 @@ static bool GenerateImage(const std::string& image_file_name, std::string* error
arg_vector.push_back("--runtime-arg");
arg_vector.push_back("-Xmx64m");
+ arg_vector.push_back("--runtime-arg");
+ std::string checkstr = "-implicit-checks";
+ int nchecks = 0;
+ char checksep = ':';
+
+ if (!Runtime::Current()->ExplicitNullChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "null";
+ ++nchecks;
+ }
+ if (!Runtime::Current()->ExplicitSuspendChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "suspend";
+ ++nchecks;
+ }
+
+ if (!Runtime::Current()->ExplicitStackOverflowChecks()) {
+ checkstr += checksep;
+ checksep = ',';
+ checkstr += "stack";
+ ++nchecks;
+ }
+
+ if (nchecks == 0) {
+ checkstr += ":none";
+ }
+
+ arg_vector.push_back(checkstr);
+
for (size_t i = 0; i < boot_class_path.size(); i++) {
arg_vector.push_back(std::string("--dex-file=") + boot_class_path[i]);
}
@@ -170,7 +201,7 @@ ImageSpace* ImageSpace::Init(const char* image_file_name, bool validate_oat_file
UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_FIXED,
+ MAP_PRIVATE,
file->Fd(),
0,
false,
diff --git a/runtime/gc/space/space-inl.h b/runtime/gc/space/space-inl.h
index 02a63f6e73..3a715ab438 100644
--- a/runtime/gc/space/space-inl.h
+++ b/runtime/gc/space/space-inl.h
@@ -21,6 +21,7 @@
#include "dlmalloc_space.h"
#include "image_space.h"
+#include "large_object_space.h"
namespace art {
namespace gc {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 589e0b042a..21eeafa9d9 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -29,7 +29,6 @@
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
#include "gc/accounting/card_table-inl.h"
-#include "invoke_arg_array_builder.h"
#include "nth_caller_visitor.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method.h"
diff --git a/runtime/invoke_arg_array_builder.h b/runtime/invoke_arg_array_builder.h
deleted file mode 100644
index 6ecce4072c..0000000000
--- a/runtime/invoke_arg_array_builder.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INVOKE_ARG_ARRAY_BUILDER_H_
-#define ART_RUNTIME_INVOKE_ARG_ARRAY_BUILDER_H_
-
-#include "mirror/art_method.h"
-#include "mirror/object.h"
-#include "scoped_thread_state_change.h"
-
-namespace art {
-
-static inline size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) {
- size_t num_bytes = 0;
- for (size_t i = 1; i < shorty_len; ++i) {
- char ch = shorty[i];
- if (ch == 'D' || ch == 'J') {
- num_bytes += 8;
- } else if (ch == 'L') {
- // Argument is a reference or an array. The shorty descriptor
- // does not distinguish between these types.
- num_bytes += sizeof(mirror::Object*);
- } else {
- num_bytes += 4;
- }
- }
- return num_bytes;
-}
-
-class ArgArray {
- public:
- explicit ArgArray(const char* shorty, uint32_t shorty_len)
- : shorty_(shorty), shorty_len_(shorty_len), num_bytes_(0) {
- size_t num_slots = shorty_len + 1; // +1 in case of receiver.
- if (LIKELY((num_slots * 2) < kSmallArgArraySize)) {
- // We can trivially use the small arg array.
- arg_array_ = small_arg_array_;
- } else {
- // Analyze shorty to see if we need the large arg array.
- for (size_t i = 1; i < shorty_len; ++i) {
- char c = shorty[i];
- if (c == 'J' || c == 'D') {
- num_slots++;
- }
- }
- if (num_slots <= kSmallArgArraySize) {
- arg_array_ = small_arg_array_;
- } else {
- large_arg_array_.reset(new uint32_t[num_slots]);
- arg_array_ = large_arg_array_.get();
- }
- }
- }
-
- uint32_t* GetArray() {
- return arg_array_;
- }
-
- uint32_t GetNumBytes() {
- return num_bytes_;
- }
-
- void Append(uint32_t value) {
- arg_array_[num_bytes_ / 4] = value;
- num_bytes_ += 4;
- }
-
- void Append(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
- }
-
- void AppendWide(uint64_t value) {
- // For ARM and MIPS portable, align wide values to 8 bytes (ArgArray starts at offset of 4).
-#if defined(ART_USE_PORTABLE_COMPILER) && (defined(__arm__) || defined(__mips__))
- if (num_bytes_ % 8 == 0) {
- num_bytes_ += 4;
- }
-#endif
- arg_array_[num_bytes_ / 4] = value;
- arg_array_[(num_bytes_ / 4) + 1] = value >> 32;
- num_bytes_ += 8;
- }
-
- void BuildArgArray(const ScopedObjectAccess& soa, mirror::Object* receiver, va_list ap)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Set receiver if non-null (method is not static)
- if (receiver != nullptr) {
- Append(receiver);
- }
- for (size_t i = 1; i < shorty_len_; ++i) {
- switch (shorty_[i]) {
- case 'Z':
- case 'B':
- case 'C':
- case 'S':
- case 'I':
- Append(va_arg(ap, jint));
- break;
- case 'F': {
- JValue value;
- value.SetF(va_arg(ap, jdouble));
- Append(value.GetI());
- break;
- }
- case 'L':
- Append(soa.Decode<mirror::Object*>(va_arg(ap, jobject)));
- break;
- case 'D': {
- JValue value;
- value.SetD(va_arg(ap, jdouble));
- AppendWide(value.GetJ());
- break;
- }
- case 'J': {
- AppendWide(va_arg(ap, jlong));
- break;
- }
- }
- }
- }
-
- void BuildArgArray(const ScopedObjectAccess& soa, mirror::Object* receiver, jvalue* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Set receiver if non-null (method is not static)
- if (receiver != nullptr) {
- Append(receiver);
- }
- for (size_t i = 1, args_offset = 0; i < shorty_len_; ++i, ++args_offset) {
- switch (shorty_[i]) {
- case 'Z':
- Append(args[args_offset].z);
- break;
- case 'B':
- Append(args[args_offset].b);
- break;
- case 'C':
- Append(args[args_offset].c);
- break;
- case 'S':
- Append(args[args_offset].s);
- break;
- case 'I':
- case 'F':
- Append(args[args_offset].i);
- break;
- case 'L':
- Append(soa.Decode<mirror::Object*>(args[args_offset].l));
- break;
- case 'D':
- case 'J':
- AppendWide(args[args_offset].j);
- break;
- }
- }
- }
-
-
- void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Set receiver if non-null (method is not static)
- size_t cur_arg = arg_offset;
- if (!shadow_frame->GetMethod()->IsStatic()) {
- Append(shadow_frame->GetVReg(cur_arg));
- cur_arg++;
- }
- for (size_t i = 1; i < shorty_len_; ++i) {
- switch (shorty_[i]) {
- case 'Z':
- case 'B':
- case 'C':
- case 'S':
- case 'I':
- case 'F':
- case 'L':
- Append(shadow_frame->GetVReg(cur_arg));
- cur_arg++;
- break;
- case 'D':
- case 'J':
- AppendWide(shadow_frame->GetVRegLong(cur_arg));
- cur_arg++;
- cur_arg++;
- break;
- }
- }
- }
-
- private:
- enum { kSmallArgArraySize = 16 };
- const char* const shorty_;
- const uint32_t shorty_len_;
- uint32_t num_bytes_;
- uint32_t* arg_array_;
- uint32_t small_arg_array_[kSmallArgArraySize];
- UniquePtr<uint32_t[]> large_arg_array_;
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_INVOKE_ARG_ARRAY_BUILDER_H_
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6efff1a740..43db7eccb0 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -30,7 +30,6 @@
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
-#include "invoke_arg_array_builder.h"
#include "jni.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -41,6 +40,7 @@
#include "mirror/throwable.h"
#include "object_utils.h"
#include "parsed_options.h"
+#include "reflection.h"
#include "runtime.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
@@ -77,104 +77,6 @@ static bool IsBadJniVersion(int version) {
return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
}
-static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const DexFile::TypeList* params = MethodHelper(m).GetParameterTypeList();
- if (params == nullptr) {
- return; // No arguments so nothing to check.
- }
- uint32_t offset = 0;
- uint32_t num_params = params->Size();
- size_t error_count = 0;
- if (!m->IsStatic()) {
- offset = 1;
- }
- for (uint32_t i = 0; i < num_params; i++) {
- uint16_t type_idx = params->GetTypeItem(i).type_idx_;
- mirror::Class* param_type = MethodHelper(m).GetClassFromTypeIdx(type_idx);
- if (param_type == nullptr) {
- Thread* self = Thread::Current();
- CHECK(self->IsExceptionPending());
- LOG(ERROR) << "Internal error: unresolvable type for argument type in JNI invoke: "
- << MethodHelper(m).GetTypeDescriptorFromTypeIdx(type_idx) << "\n"
- << self->GetException(nullptr)->Dump();
- self->ClearException();
- ++error_count;
- } else if (!param_type->IsPrimitive()) {
- // TODO: check primitives are in range.
- mirror::Object* argument = reinterpret_cast<mirror::Object*>(args[i + offset]);
- if (argument != nullptr && !argument->InstanceOf(param_type)) {
- LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of "
- << PrettyTypeOf(argument) << " as argument " << (i + 1)
- << " to " << PrettyMethod(m);
- ++error_count;
- }
- } else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) {
- offset++;
- }
- }
- if (error_count > 0) {
- // TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort
- // with an argument.
- JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
- PrettyMethod(m).c_str());
- }
-}
-
-void InvokeWithArgArray(const ScopedObjectAccess& soa, mirror::ArtMethod* method,
- ArgArray* arg_array, JValue* result, const char* shorty)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t* args = arg_array->GetArray();
- if (UNLIKELY(soa.Env()->check_jni)) {
- CheckMethodArguments(method, args);
- }
- method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
-}
-
-static JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj,
- jmethodID mid, va_list args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = soa.DecodeMethod(mid);
- mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
- MethodHelper mh(method);
- JValue result;
- ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
- arg_array.BuildArgArray(soa, receiver, args);
- InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
- return result;
-}
-
-static mirror::ArtMethod* FindVirtualMethod(mirror::Object* receiver, mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method);
-}
-
-static JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa,
- jobject obj, jmethodID mid, jvalue* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
- mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
- MethodHelper mh(method);
- JValue result;
- ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
- arg_array.BuildArgArray(soa, receiver, args);
- InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
- return result;
-}
-
-static JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccess& soa,
- jobject obj, jmethodID mid, va_list args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
- mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
- MethodHelper mh(method);
- JValue result;
- ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
- arg_array.BuildArgArray(soa, receiver, args);
- InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
- return result;
-}
-
// Section 12.3.2 of the JNI spec describes JNI class descriptors. They're
// separated with slashes but aren't wrapped with "L;" like regular descriptors
// (i.e. "a/b/C" rather than "La/b/C;"). Arrays of reference types are an
@@ -611,18 +513,6 @@ class Libraries {
SafeMap<std::string, SharedLibrary*> libraries_;
};
-JValue InvokeWithJValues(const ScopedObjectAccess& soa, jobject obj, jmethodID mid,
- jvalue* args) {
- mirror::ArtMethod* method = soa.DecodeMethod(mid);
- mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
- MethodHelper mh(method);
- JValue result;
- ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
- arg_array.BuildArgArray(soa, receiver, args);
- InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
- return result;
-}
-
#define CHECK_NON_NULL_ARGUMENT(fn, value) \
if (UNLIKELY(value == nullptr)) { \
JniAbortF(#fn, #value " == null"); \
@@ -1014,7 +904,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallObjectMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallObjectMethodA, mid);
ScopedObjectAccess soa(env);
- JValue result(InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args));
+ JValue result(InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args));
return soa.AddLocalReference<jobject>(result.GetL());
}
@@ -1040,7 +931,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallBooleanMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallBooleanMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetZ();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetZ();
}
static jbyte CallByteMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1065,7 +957,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallByteMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallByteMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetB();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetB();
}
static jchar CallCharMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1090,7 +983,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallCharMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallCharMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetC();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetC();
}
static jdouble CallDoubleMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1115,7 +1009,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallDoubleMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallDoubleMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetD();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetD();
}
static jfloat CallFloatMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1140,7 +1035,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallFloatMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallFloatMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetF();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetF();
}
static jint CallIntMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1165,7 +1061,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallIntMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallIntMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetI();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetI();
}
static jlong CallLongMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1190,7 +1087,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallLongMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallLongMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetJ();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetJ();
}
static jshort CallShortMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1215,7 +1113,8 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallShortMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallShortMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetS();
+ return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
+ args).GetS();
}
static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1239,7 +1138,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallVoidMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallVoidMethodA, mid);
ScopedObjectAccess soa(env);
- InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args);
+ InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
}
static jobject CallNonvirtualObjectMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1268,7 +1167,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethodA, mid);
ScopedObjectAccess soa(env);
- JValue result(InvokeWithJValues(soa, obj, mid, args));
+ JValue result(InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
}
@@ -1297,7 +1196,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetZ();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetZ();
}
static jbyte CallNonvirtualByteMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1324,7 +1223,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetB();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetB();
}
static jchar CallNonvirtualCharMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1351,7 +1250,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetC();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetC();
}
static jshort CallNonvirtualShortMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1378,7 +1277,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetS();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetS();
}
static jint CallNonvirtualIntMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1405,7 +1304,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetI();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetI();
}
static jlong CallNonvirtualLongMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1432,7 +1331,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetJ();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetJ();
}
static jfloat CallNonvirtualFloatMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1459,7 +1358,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetF();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetF();
}
static jdouble CallNonvirtualDoubleMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1486,7 +1385,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethodA, mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, obj, mid, args).GetD();
+ return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetD();
}
static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1512,7 +1411,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethodA, obj);
CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethodA, mid);
ScopedObjectAccess soa(env);
- InvokeWithJValues(soa, obj, mid, args);
+ InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
}
static jfieldID GetFieldID(JNIEnv* env, jclass java_class, const char* name, const char* sig) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 7b49d33625..42796dbe79 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -42,7 +42,6 @@ namespace mirror {
class ArtMethod;
class ClassLoader;
} // namespace mirror
-class ArgArray;
union JValue;
class Libraries;
class ParsedOptions;
@@ -55,12 +54,6 @@ void JniAbortF(const char* jni_function_name, const char* fmt, ...)
void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
jint method_count);
-JValue InvokeWithJValues(const ScopedObjectAccess&, jobject obj, jmethodID mid, jvalue* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void InvokeWithArgArray(const ScopedObjectAccess& soa, mirror::ArtMethod* method,
- ArgArray *arg_array, JValue* result, const char* shorty)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
class JavaVMExt : public JavaVM {
@@ -155,6 +148,10 @@ struct JNIEnvExt : public JNIEnv {
void PushFrame(int capacity);
void PopFrame();
+ template<typename T>
+ T AddLocalReference(mirror::Object* obj, bool jni_work_arounds)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static Offset SegmentStateOffset();
static Offset LocalRefCookieOffset() {
@@ -218,8 +215,29 @@ class ScopedJniEnvLocalRefState {
DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
};
+template<typename T>
+inline T JNIEnvExt::AddLocalReference(mirror::Object* obj, bool jni_work_arounds) {
+ IndirectRef ref = locals.Add(local_ref_cookie, obj);
+
+ // TODO: fix this to understand PushLocalFrame, so we can turn it on.
+ if (false) {
+ if (check_jni) {
+ size_t entry_count = locals.Capacity();
+ if (entry_count > 16) {
+ locals.Dump(LOG(WARNING) << "Warning: more than 16 JNI local references: "
+ << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n");
+ // TODO: LOG(FATAL) in a later release?
+ }
+ }
+ }
+
+ if (jni_work_arounds) {
+ return reinterpret_cast<T>(obj);
+ }
+ return reinterpret_cast<T>(ref);
+}
+
} // namespace art
std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs);
-
#endif // ART_RUNTIME_JNI_INTERNAL_H_
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 28408d25cf..071b658fb9 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -16,18 +16,9 @@
#include "jni_internal.h"
-#include <limits.h>
-#include <cfloat>
-#include <cmath>
-
#include "common_compiler_test.h"
-#include "invoke_arg_array_builder.h"
#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
#include "ScopedLocalRef.h"
-#include "sirt_ref.h"
namespace art {
@@ -74,7 +65,7 @@ class JniInternalTest : public CommonCompilerTest {
}
}
- virtual void TearDown() {
+ virtual void TearDown() OVERRIDE {
CleanUpJniEnv();
CommonCompilerTest::TearDown();
}
@@ -86,676 +77,6 @@ class JniInternalTest : public CommonCompilerTest {
return soa.AddLocalReference<jclass>(c);
}
- void JniInternalTestMakeExecutable(mirror::ArtMethod** method,
- mirror::Object** receiver,
- bool is_static, const char* method_name,
- const char* method_signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
- jobject jclass_loader(LoadDex(class_name));
- Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> null_class_loader(self, nullptr);
- SirtRef<mirror::ClassLoader>
- class_loader(self,
- ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader));
- if (is_static) {
- MakeExecutable(ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader),
- class_name);
- } else {
- MakeExecutable(nullptr, "java.lang.Class");
- MakeExecutable(nullptr, "java.lang.Object");
- MakeExecutable(ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader),
- class_name);
- }
-
- mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
- class_loader);
- CHECK(c != NULL);
-
- *method = is_static ? c->FindDirectMethod(method_name, method_signature)
- : c->FindVirtualMethod(method_name, method_signature);
- CHECK(method != nullptr);
-
- *receiver = (is_static ? nullptr : c->AllocObject(self));
-
- // Start runtime.
- bool started = runtime_->Start();
- CHECK(started);
- self->TransitionFromSuspendedToRunnable();
- }
-
- void InvokeNopMethod(bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
-
- ArgArray arg_array("V", 1);
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- }
-
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "V");
- }
-
- void InvokeIdentityByteMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "identity", "(B)B");
-
- ArgArray arg_array("BB", 2);
- uint32_t* args = arg_array.GetArray();
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- arg_array.Append(0U);
- result.SetB(-1);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "BB");
- EXPECT_EQ(0, result.GetB());
-
- args[0] = -1;
- result.SetB(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "BB");
- EXPECT_EQ(-1, result.GetB());
-
- args[0] = SCHAR_MAX;
- result.SetB(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "BB");
- EXPECT_EQ(SCHAR_MAX, result.GetB());
-
- args[0] = (SCHAR_MIN << 24) >> 24;
- result.SetB(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "BB");
- EXPECT_EQ(SCHAR_MIN, result.GetB());
- }
-
- void InvokeIdentityIntMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "identity", "(I)I");
-
- ArgArray arg_array("II", 2);
- uint32_t* args = arg_array.GetArray();
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- arg_array.Append(0U);
- result.SetI(-1);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "II");
- EXPECT_EQ(0, result.GetI());
-
- args[0] = -1;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "II");
- EXPECT_EQ(-1, result.GetI());
-
- args[0] = INT_MAX;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "II");
- EXPECT_EQ(INT_MAX, result.GetI());
-
- args[0] = INT_MIN;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "II");
- EXPECT_EQ(INT_MIN, result.GetI());
- }
-
- void InvokeIdentityDoubleMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "identity", "(D)D");
-
- ArgArray arg_array("DD", 2);
- uint32_t* args = arg_array.GetArray();
- JValue value;
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- value.SetD(0.0);
- arg_array.AppendWide(value.GetJ());
- result.SetD(-1.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "DD");
- EXPECT_EQ(0.0, result.GetD());
-
- value.SetD(-1.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "DD");
- EXPECT_EQ(-1.0, result.GetD());
-
- value.SetD(DBL_MAX);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "DD");
- EXPECT_EQ(DBL_MAX, result.GetD());
-
- value.SetD(DBL_MIN);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "DD");
- EXPECT_EQ(DBL_MIN, result.GetD());
- }
-
- void InvokeSumIntIntMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(II)I");
-
- ArgArray arg_array("III", 3);
- uint32_t* args = arg_array.GetArray();
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- arg_array.Append(0U);
- arg_array.Append(0U);
- result.SetI(-1);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "III");
- EXPECT_EQ(0, result.GetI());
-
- args[0] = 1;
- args[1] = 2;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "III");
- EXPECT_EQ(3, result.GetI());
-
- args[0] = -2;
- args[1] = 5;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "III");
- EXPECT_EQ(3, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MIN;
- result.SetI(1234);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "III");
- EXPECT_EQ(-1, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MAX;
- result.SetI(INT_MIN);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "III");
- EXPECT_EQ(-2, result.GetI());
- }
-
- void InvokeSumIntIntIntMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(III)I");
-
- ArgArray arg_array("IIII", 4);
- uint32_t* args = arg_array.GetArray();
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- arg_array.Append(0U);
- arg_array.Append(0U);
- arg_array.Append(0U);
- result.SetI(-1);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIII");
- EXPECT_EQ(0, result.GetI());
-
- args[0] = 1;
- args[1] = 2;
- args[2] = 3;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIII");
- EXPECT_EQ(6, result.GetI());
-
- args[0] = -1;
- args[1] = 2;
- args[2] = -3;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIII");
- EXPECT_EQ(-2, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MIN;
- args[2] = INT_MAX;
- result.SetI(1234);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIII");
- EXPECT_EQ(2147483646, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MAX;
- args[2] = INT_MAX;
- result.SetI(INT_MIN);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIII");
- EXPECT_EQ(2147483645, result.GetI());
- }
-
- void InvokeSumIntIntIntIntMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIII)I");
-
- ArgArray arg_array("IIIII", 5);
- uint32_t* args = arg_array.GetArray();
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- arg_array.Append(0U);
- arg_array.Append(0U);
- arg_array.Append(0U);
- arg_array.Append(0U);
- result.SetI(-1);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIII");
- EXPECT_EQ(0, result.GetI());
-
- args[0] = 1;
- args[1] = 2;
- args[2] = 3;
- args[3] = 4;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIII");
- EXPECT_EQ(10, result.GetI());
-
- args[0] = -1;
- args[1] = 2;
- args[2] = -3;
- args[3] = 4;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIII");
- EXPECT_EQ(2, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MIN;
- args[2] = INT_MAX;
- args[3] = INT_MIN;
- result.SetI(1234);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIII");
- EXPECT_EQ(-2, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MAX;
- args[2] = INT_MAX;
- args[3] = INT_MAX;
- result.SetI(INT_MIN);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIII");
- EXPECT_EQ(-4, result.GetI());
- }
-
- void InvokeSumIntIntIntIntIntMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIIII)I");
-
- ArgArray arg_array("IIIIII", 6);
- uint32_t* args = arg_array.GetArray();
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- arg_array.Append(0U);
- arg_array.Append(0U);
- arg_array.Append(0U);
- arg_array.Append(0U);
- arg_array.Append(0U);
- result.SetI(-1.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIIII");
- EXPECT_EQ(0, result.GetI());
-
- args[0] = 1;
- args[1] = 2;
- args[2] = 3;
- args[3] = 4;
- args[4] = 5;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIIII");
- EXPECT_EQ(15, result.GetI());
-
- args[0] = -1;
- args[1] = 2;
- args[2] = -3;
- args[3] = 4;
- args[4] = -5;
- result.SetI(0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIIII");
- EXPECT_EQ(-3, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MIN;
- args[2] = INT_MAX;
- args[3] = INT_MIN;
- args[4] = INT_MAX;
- result.SetI(1234);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIIII");
- EXPECT_EQ(2147483645, result.GetI());
-
- args[0] = INT_MAX;
- args[1] = INT_MAX;
- args[2] = INT_MAX;
- args[3] = INT_MAX;
- args[4] = INT_MAX;
- result.SetI(INT_MIN);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "IIIIII");
- EXPECT_EQ(2147483643, result.GetI());
- }
-
- void InvokeSumDoubleDoubleMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(DD)D");
-
- ArgArray arg_array("DDD", 3);
- uint32_t* args = arg_array.GetArray();
- JValue value;
- JValue value2;
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- value.SetD(0.0);
- value2.SetD(0.0);
- arg_array.AppendWide(value.GetJ());
- arg_array.AppendWide(value2.GetJ());
- result.SetD(-1.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDD");
- EXPECT_EQ(0.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(2.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDD");
- EXPECT_EQ(3.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(-2.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDD");
- EXPECT_EQ(-1.0, result.GetD());
-
- value.SetD(DBL_MAX);
- value2.SetD(DBL_MIN);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDD");
- EXPECT_EQ(1.7976931348623157e308, result.GetD());
-
- value.SetD(DBL_MAX);
- value2.SetD(DBL_MAX);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDD");
- EXPECT_EQ(INFINITY, result.GetD());
- }
-
- void InvokeSumDoubleDoubleDoubleMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDD)D");
-
- ArgArray arg_array("DDDD", 4);
- uint32_t* args = arg_array.GetArray();
- JValue value;
- JValue value2;
- JValue value3;
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- value.SetD(0.0);
- value2.SetD(0.0);
- value3.SetD(0.0);
- arg_array.AppendWide(value.GetJ());
- arg_array.AppendWide(value2.GetJ());
- arg_array.AppendWide(value3.GetJ());
- result.SetD(-1.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDD");
- EXPECT_EQ(0.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(2.0);
- value3.SetD(3.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- args[4] = value3.GetJ();
- args[5] = value3.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDD");
- EXPECT_EQ(6.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(-2.0);
- value3.SetD(3.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- args[4] = value3.GetJ();
- args[5] = value3.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDD");
- EXPECT_EQ(2.0, result.GetD());
- }
-
- void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDD)D");
-
- ArgArray arg_array("DDDDD", 5);
- uint32_t* args = arg_array.GetArray();
- JValue value;
- JValue value2;
- JValue value3;
- JValue value4;
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- value.SetD(0.0);
- value2.SetD(0.0);
- value3.SetD(0.0);
- value4.SetD(0.0);
- arg_array.AppendWide(value.GetJ());
- arg_array.AppendWide(value2.GetJ());
- arg_array.AppendWide(value3.GetJ());
- arg_array.AppendWide(value4.GetJ());
- result.SetD(-1.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDDD");
- EXPECT_EQ(0.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(2.0);
- value3.SetD(3.0);
- value4.SetD(4.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- args[4] = value3.GetJ();
- args[5] = value3.GetJ() >> 32;
- args[6] = value4.GetJ();
- args[7] = value4.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDDD");
- EXPECT_EQ(10.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(-2.0);
- value3.SetD(3.0);
- value4.SetD(-4.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- args[4] = value3.GetJ();
- args[5] = value3.GetJ() >> 32;
- args[6] = value4.GetJ();
- args[7] = value4.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDDD");
- EXPECT_EQ(-2.0, result.GetD());
- }
-
- void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- mirror::Object* receiver;
- JniInternalTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDDD)D");
-
- ArgArray arg_array("DDDDDD", 6);
- uint32_t* args = arg_array.GetArray();
- JValue value;
- JValue value2;
- JValue value3;
- JValue value4;
- JValue value5;
- JValue result;
-
- if (!is_static) {
- arg_array.Append(receiver);
- args++;
- }
-
- value.SetD(0.0);
- value2.SetD(0.0);
- value3.SetD(0.0);
- value4.SetD(0.0);
- value5.SetD(0.0);
- arg_array.AppendWide(value.GetJ());
- arg_array.AppendWide(value2.GetJ());
- arg_array.AppendWide(value3.GetJ());
- arg_array.AppendWide(value4.GetJ());
- arg_array.AppendWide(value5.GetJ());
- result.SetD(-1.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDDDD");
- EXPECT_EQ(0.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(2.0);
- value3.SetD(3.0);
- value4.SetD(4.0);
- value5.SetD(5.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- args[4] = value3.GetJ();
- args[5] = value3.GetJ() >> 32;
- args[6] = value4.GetJ();
- args[7] = value4.GetJ() >> 32;
- args[8] = value5.GetJ();
- args[9] = value5.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDDDD");
- EXPECT_EQ(15.0, result.GetD());
-
- value.SetD(1.0);
- value2.SetD(-2.0);
- value3.SetD(3.0);
- value4.SetD(-4.0);
- value5.SetD(5.0);
- args[0] = value.GetJ();
- args[1] = value.GetJ() >> 32;
- args[2] = value2.GetJ();
- args[3] = value2.GetJ() >> 32;
- args[4] = value3.GetJ();
- args[5] = value3.GetJ() >> 32;
- args[6] = value4.GetJ();
- args[7] = value4.GetJ() >> 32;
- args[8] = value5.GetJ();
- args[9] = value5.GetJ() >> 32;
- result.SetD(0.0);
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result,
- "DDDDDD");
- EXPECT_EQ(3.0, result.GetD());
- }
-
JavaVMExt* vm_;
JNIEnv* env_;
jclass aioobe_;
@@ -1772,176 +1093,6 @@ TEST_F(JniInternalTest, DeleteWeakGlobalRef) {
env_->DeleteWeakGlobalRef(o2);
}
-TEST_F(JniInternalTest, StaticMainMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- jobject jclass_loader = LoadDex("Main");
- SirtRef<mirror::ClassLoader>
- class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
-
- mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
- ASSERT_TRUE(klass != NULL);
-
- mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
- ASSERT_TRUE(method != NULL);
-
- ArgArray arg_array("VL", 2);
- arg_array.Append(0U);
- JValue result;
-
- // Start runtime.
- bool started = runtime_->Start();
- CHECK(started);
- Thread::Current()->TransitionFromSuspendedToRunnable();
-
- method->Invoke(Thread::Current(), arg_array.GetArray(), arg_array.GetNumBytes(), &result, "VL");
-}
-
-TEST_F(JniInternalTest, StaticNopMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeNopMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticNopMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeNopMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticIdentityByteMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeIdentityByteMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticIdentityByteMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeIdentityByteMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticIdentityIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeIdentityIntMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticIdentityIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeIdentityIntMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticIdentityDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeIdentityDoubleMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticIdentityDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeIdentityDoubleMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntIntMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntIntMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntIntIntMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntIntIntMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumIntIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntIntIntIntMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumIntIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumIntIntIntIntIntMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleDoubleMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleDoubleMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleDoubleDoubleMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleDoubleDoubleMethod(false);
-}
-
-TEST_F(JniInternalTest, StaticSumDoubleDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(true);
-}
-
-TEST_F(JniInternalTest, NonStaticSumDoubleDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
- ScopedObjectAccess soa(Thread::Current());
- InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(false);
-}
-
TEST_F(JniInternalTest, Throw) {
EXPECT_EQ(JNI_ERR, env_->Throw(NULL));
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index fdfb47779c..582ab6eeac 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -32,8 +32,6 @@
namespace art {
-#if !defined(NDEBUG)
-
static std::ostream& operator<<(
std::ostream& os,
std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
@@ -48,43 +46,61 @@ static std::ostream& operator<<(
return os;
}
-static void CheckMapRequest(byte* addr, size_t byte_count) {
- if (addr == NULL) {
- return;
+static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
+ std::ostringstream* error_msg) {
+ // Handled first by caller for more specific error messages.
+ CHECK(actual_ptr != MAP_FAILED);
+
+ if (expected_ptr == nullptr) {
+ return true;
+ }
+
+ if (expected_ptr == actual_ptr) {
+ return true;
+ }
+
+ // We asked for an address but didn't get what we wanted, all paths below here should fail.
+ int result = munmap(actual_ptr, byte_count);
+ if (result == -1) {
+ PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
}
- uintptr_t base = reinterpret_cast<uintptr_t>(addr);
- uintptr_t limit = base + byte_count;
+ uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
+ uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+ uintptr_t limit = expected + byte_count;
UniquePtr<BacktraceMap> map(BacktraceMap::Create(getpid()));
if (!map->Build()) {
- PLOG(WARNING) << "Failed to build process map";
- return;
+ *error_msg << StringPrintf("Failed to build process map to determine why mmap returned "
+ "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
+
+ return false;
}
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
- CHECK(!(base >= it->start && base < it->end) // start of new within old
- && !(limit > it->start && limit < it->end) // end of new within old
- && !(base <= it->start && limit > it->end)) // start/end of new includes all of old
- << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
- "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
- base, limit,
- static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
- it->name.c_str())
- << std::make_pair(it, map->end());
+ if ((expected >= it->start && expected < it->end) // start of new within old
+ || (limit > it->start && limit < it->end) // end of new within old
+ || (expected <= it->start && limit > it->end)) { // start/end of new includes all of old
+ *error_msg
+ << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
+ "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
+ expected, limit,
+ static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
+ it->name.c_str())
+ << std::make_pair(it, map->end());
+ return false;
+ }
}
+ *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
+ "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
+ return false;
}
-#else
-static void CheckMapRequest(byte*, size_t) { }
-#endif
-
-MemMap* MemMap::MapAnonymous(const char* name, byte* addr, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
if (byte_count == 0) {
- return new MemMap(name, NULL, 0, NULL, 0, prot);
+ return new MemMap(name, nullptr, 0, nullptr, 0, prot);
}
size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
- CheckMapRequest(addr, page_aligned_byte_count);
#ifdef USE_ASHMEM
// android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
@@ -92,11 +108,11 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* addr, size_t byte_count, in
std::string debug_friendly_name("dalvik-");
debug_friendly_name += name;
ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
- int flags = MAP_PRIVATE;
if (fd.get() == -1) {
*error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
return nullptr;
}
+ int flags = MAP_PRIVATE;
#else
ScopedFd fd(-1);
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
@@ -106,64 +122,80 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* addr, size_t byte_count, in
flags |= MAP_32BIT;
}
#endif
- byte* actual = reinterpret_cast<byte*>(mmap(addr, page_aligned_byte_count, prot, flags, fd.get(), 0));
+
+ void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+ std::string strerr(strerror(errno));
if (actual == MAP_FAILED) {
std::string maps;
ReadFileToString("/proc/self/maps", &maps);
- *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s",
- addr, page_aligned_byte_count, prot, flags, fd.get(),
- maps.c_str());
+ *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
+ expected, page_aligned_byte_count, prot, flags, fd.get(),
+ strerr.c_str(), maps.c_str());
+ return nullptr;
+ }
+ std::ostringstream check_map_request_error_msg;
+ if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
+ *error_msg = check_map_request_error_msg.str();
return nullptr;
}
- return new MemMap(name, actual, byte_count, actual, page_aligned_byte_count, prot);
+ return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
+ page_aligned_byte_count, prot);
}
-MemMap* MemMap::MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
+ if (reuse) {
+ // reuse means it is okay that it overlaps an existing page mapping.
+ // Only use this if you actually made the page reservation yourself.
+ CHECK(expected != nullptr);
+ flags |= MAP_FIXED;
+ } else {
+ CHECK_EQ(0, flags & MAP_FIXED);
+ }
+
if (byte_count == 0) {
- return new MemMap(filename, NULL, 0, NULL, 0, prot);
+ return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
}
// Adjust 'offset' to be page-aligned as required by mmap.
int page_offset = start % kPageSize;
off_t page_aligned_offset = start - page_offset;
// Adjust 'byte_count' to be page-aligned as we will map this anyway.
size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
- // The 'addr' is modified (if specified, ie non-null) to be page aligned to the file but not
- // necessarily to virtual memory. mmap will page align 'addr' for us.
- byte* page_aligned_addr = (addr == NULL) ? NULL : (addr - page_offset);
- if (!reuse) {
- // reuse means it is okay that it overlaps an existing page mapping.
- // Only use this if you actually made the page reservation yourself.
- CheckMapRequest(page_aligned_addr, page_aligned_byte_count);
- } else {
- CHECK(addr != NULL);
- }
- byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_addr,
+ // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
+ // necessarily to virtual memory. mmap will page align 'expected' for us.
+ byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
+
+ byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
page_aligned_byte_count,
prot,
flags,
fd,
page_aligned_offset));
+ std::string strerr(strerror(errno));
if (actual == MAP_FAILED) {
- std::string strerr(strerror(errno));
std::string maps;
ReadFileToString("/proc/self/maps", &maps);
*error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
") of file '%s' failed: %s\n%s",
- page_aligned_addr, page_aligned_byte_count, prot, flags, fd,
+ page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
static_cast<int64_t>(page_aligned_offset), filename, strerr.c_str(),
maps.c_str());
- return NULL;
+ return nullptr;
+ }
+ std::ostringstream check_map_request_error_msg;
+ if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
+ *error_msg = check_map_request_error_msg.str();
+ return nullptr;
}
return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
prot);
}
MemMap::~MemMap() {
- if (base_begin_ == NULL && base_size_ == 0) {
+ if (base_begin_ == nullptr && base_size_ == 0) {
return;
}
int result = munmap(base_begin_, base_size_);
@@ -177,12 +209,12 @@ MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_beg
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
prot_(prot) {
if (size_ == 0) {
- CHECK(begin_ == NULL);
- CHECK(base_begin_ == NULL);
+ CHECK(begin_ == nullptr);
+ CHECK(base_begin_ == nullptr);
CHECK_EQ(base_size_, 0U);
} else {
- CHECK(begin_ != NULL);
- CHECK(base_begin_ != NULL);
+ CHECK(begin_ != nullptr);
+ CHECK(base_begin_ != nullptr);
CHECK_NE(base_size_, 0U);
}
};
@@ -201,7 +233,7 @@ MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
byte* new_base_end = new_end;
DCHECK_LE(new_base_end, old_base_end);
if (new_base_end == old_base_end) {
- return new MemMap(tail_name, NULL, 0, NULL, 0, tail_prot);
+ return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
}
size_ = new_end - reinterpret_cast<byte*>(begin_);
base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
@@ -257,7 +289,7 @@ MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
}
bool MemMap::Protect(int prot) {
- if (base_begin_ == NULL && base_size_ == 0) {
+ if (base_begin_ == nullptr && base_size_ == 0) {
prot_ = prot;
return true;
}
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 6cb59b42e4..eea330799c 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -120,6 +120,39 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
}
#endif
+TEST_F(MemMapTest, MapAnonymousExactAddr) {
+ std::string error_msg;
+ // Map at an address that should work, which should succeed.
+ UniquePtr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
+ reinterpret_cast<byte*>(ART_BASE_ADDRESS),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
+ // Map at an unspecified address, which should succeed.
+ UniquePtr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
+ nullptr,
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_TRUE(map1->BaseBegin() != nullptr);
+ // Attempt to map at the same address, which should fail.
+ UniquePtr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
+ reinterpret_cast<byte*>(map1->BaseBegin()),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_TRUE(map2.get() == nullptr) << error_msg;
+ ASSERT_TRUE(!error_msg.empty());
+}
+
TEST_F(MemMapTest, RemapAtEnd) {
RemapAtEndTest(false);
}
diff --git a/runtime/method_reference.h b/runtime/method_reference.h
index 8e46d7e607..f4fe9b20bd 100644
--- a/runtime/method_reference.h
+++ b/runtime/method_reference.h
@@ -23,9 +23,6 @@ namespace art {
class DexFile;
-// A class is uniquely located by its DexFile and the class_defs_ table index into that DexFile
-typedef std::pair<const DexFile*, uint32_t> ClassReference;
-
// A method is uniquely located by its DexFile and the method_ids_ table index into that DexFile
struct MethodReference {
MethodReference(const DexFile* file, uint32_t index) : dex_file(file), dex_method_index(index) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 6b897cbb4b..e8a0891f8a 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -157,7 +157,7 @@ uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc) {
return pc - reinterpret_cast<uintptr_t>(code);
}
-uint32_t ArtMethod::ToDexPc(const uintptr_t pc) {
+uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
if (IsPortableCompiled()) {
// Portable doesn't use the machine pc, we just use dex pc instead.
return static_cast<uint32_t>(pc);
@@ -183,9 +183,11 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc) {
return cur.DexPc();
}
}
- LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
- << "(PC " << reinterpret_cast<void*>(pc) << ", code=" << code
- << ") in " << PrettyMethod(this);
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
+ << "(PC " << reinterpret_cast<void*>(pc) << ", code=" << code
+ << ") in " << PrettyMethod(this);
+ }
return DexFile::kDexNoIndex;
}
@@ -320,15 +322,6 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
self->PopManagedStackFragment(fragment);
}
-#ifndef NDEBUG
-size_t ArtMethod::GetSirtOffsetInBytes() {
- CHECK(IsNative());
- // TODO: support Sirt access from generic JNI trampoline.
- CHECK_NE(GetEntryPointFromQuickCompiledCode(), GetQuickGenericJniTrampoline());
- return kPointerSize;
-}
-#endif
-
bool ArtMethod::IsRegistered() {
void* native_method =
GetFieldPtr<void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), false);
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 8c22e67160..c6549330af 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -325,10 +325,13 @@ class MANAGED ArtMethod : public Object {
void SetOatNativeGcMapOffset(uint32_t gc_map_offset);
uint32_t GetOatNativeGcMapOffset();
+ template <bool kCheckFrameSize = true>
uint32_t GetFrameSizeInBytes() {
uint32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_),
false);
- DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
+ if (kCheckFrameSize) {
+ DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
+ }
return result;
}
@@ -342,13 +345,9 @@ class MANAGED ArtMethod : public Object {
return GetFrameSizeInBytes() - kPointerSize;
}
-#ifndef NDEBUG
- size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-#else
size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return kPointerSize;
}
-#endif
bool IsRegistered();
@@ -406,7 +405,8 @@ class MANAGED ArtMethod : public Object {
uintptr_t NativePcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Converts a native PC to a dex PC.
- uint32_t ToDexPc(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Converts a dex PC to a native PC.
uintptr_t ToNativePc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 484c21a02f..cad10172b7 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -27,6 +27,7 @@
#include "lock_word-inl.h"
#include "monitor.h"
#include "runtime.h"
+#include "reference.h"
#include "throwable.h"
namespace art {
@@ -197,6 +198,12 @@ inline bool Object::IsReferenceInstance() {
}
template<VerifyObjectFlags kVerifyFlags>
+inline Reference* Object::AsReference() {
+ DCHECK(IsReferenceInstance<kVerifyFlags>());
+ return down_cast<Reference*>(this);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
inline Array* Object::AsArray() {
DCHECK(IsArrayInstance<kVerifyFlags>());
return down_cast<Array*>(this);
@@ -314,6 +321,12 @@ inline bool Object::IsFinalizerReferenceInstance() {
}
template<VerifyObjectFlags kVerifyFlags>
+inline FinalizerReference* Object::AsFinalizerReference() {
+ DCHECK(IsFinalizerReferenceInstance<kVerifyFlags>());
+ return down_cast<FinalizerReference*>(this);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsPhantomReferenceInstance() {
return GetClass<kVerifyFlags>()->IsPhantomReferenceClass();
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 4e2c624516..476259f57c 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -41,6 +41,7 @@ class ArtField;
class ArtMethod;
class Array;
class Class;
+class FinalizerReference;
template<class T> class ObjectArray;
template<class T> class PrimitiveArray;
typedef PrimitiveArray<uint8_t> BooleanArray;
@@ -51,6 +52,7 @@ typedef PrimitiveArray<float> FloatArray;
typedef PrimitiveArray<int32_t> IntArray;
typedef PrimitiveArray<int64_t> LongArray;
typedef PrimitiveArray<int16_t> ShortArray;
+class Reference;
class String;
class Throwable;
@@ -170,12 +172,16 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Accessor for Java type fields.
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
new file mode 100644
index 0000000000..0f76f77e0a
--- /dev/null
+++ b/runtime/mirror/reference-inl.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_REFERENCE_INL_H_
+#define ART_RUNTIME_MIRROR_REFERENCE_INL_H_
+
+#include "reference.h"
+
+namespace art {
+namespace mirror {
+
+inline bool Reference::IsEnqueuable() {
+ // Not using volatile reads as an optimization since this is only called with all the mutators
+ // suspended.
+ const Object* queue = GetFieldObject<mirror::Object>(QueueOffset(), false);
+ const Object* queue_next = GetFieldObject<mirror::Object>(QueueNextOffset(), false);
+ return queue != nullptr && queue_next == nullptr;
+}
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_REFERENCE_INL_H_
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
new file mode 100644
index 0000000000..c2a83ff855
--- /dev/null
+++ b/runtime/mirror/reference.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_REFERENCE_H_
+#define ART_RUNTIME_MIRROR_REFERENCE_H_
+
+#include "object.h"
+
+namespace art {
+
+struct ReferenceOffsets;
+struct FinalizerReferenceOffsets;
+
+namespace mirror {
+
+// C++ mirror of java.lang.ref.Reference
+class MANAGED Reference : public Object {
+ public:
+ static MemberOffset PendingNextOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Reference, pending_next_);
+ }
+ static MemberOffset QueueOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Reference, queue_);
+ }
+ static MemberOffset QueueNextOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Reference, queue_next_);
+ }
+ static MemberOffset ReferentOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
+ }
+
+ Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<Object>(ReferentOffset(), true);
+ }
+ template<bool kTransactionActive>
+ void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldObject<kTransactionActive>(ReferentOffset(), referent, true);
+ }
+ template<bool kTransactionActive>
+ void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldObject<kTransactionActive>(ReferentOffset(), nullptr, true);
+ }
+
+ // Volatile read/write is not necessary since the java pending next is only accessed from
+ // the java threads for cleared references. Once these cleared references have a null referent,
+ // we never end up reading their pending next from the GC again.
+ Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<Reference>(PendingNextOffset(), false);
+ }
+ template<bool kTransactionActive>
+ void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next, false);
+ }
+
+ bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Since the references are stored as cyclic lists it means that once enqueued, the pending
+ // next is always non-null.
+ return GetPendingNext() != nullptr;
+ }
+
+ bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
+ HeapReference<Reference> pending_next_; // Note this is Java volatile:
+ HeapReference<Object> queue_; // Note this is Java volatile:
+ HeapReference<Reference> queue_next_; // Note this is Java volatile:
+ HeapReference<Object> referent_; // Note this is Java volatile:
+
+ friend struct art::ReferenceOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Reference);
+};
+
+// C++ mirror of java.lang.ref.FinalizerReference
+class MANAGED FinalizerReference : public Reference {
+ public:
+ static MemberOffset ZombieOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(FinalizerReference, zombie_);
+ }
+
+ template<bool kTransactionActive>
+ void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return SetFieldObject<kTransactionActive>(ZombieOffset(), zombie, true);
+ }
+ Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<Object>(ZombieOffset(), true);
+ }
+
+ private:
+ HeapReference<FinalizerReference> next_;
+ HeapReference<FinalizerReference> prev_;
+ HeapReference<Object> zombie_;
+
+ friend struct art::FinalizerReferenceOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FinalizerReference);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_REFERENCE_H_
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 64829178dc..0b58af4c7c 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -29,7 +29,7 @@
#include "jni_internal.h"
#include "mirror/class.h"
#include "ScopedUtfChars.h"
-#include "scoped_thread_state_change.h"
+#include "scoped_fast_native_object_access.h"
#include "toStringArray.h"
#include "trace.h"
@@ -153,12 +153,12 @@ static void VMDebug_resetInstructionCount(JNIEnv* env, jclass) {
}
static void VMDebug_printLoadedClasses(JNIEnv* env, jclass, jint flags) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
return Runtime::Current()->GetClassLinker()->DumpAllClasses(flags);
}
static jint VMDebug_getLoadedClassCount(JNIEnv* env, jclass) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
return Runtime::Current()->GetClassLinker()->NumLoadedClasses();
}
@@ -318,14 +318,14 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"),
NATIVE_METHOD(VMDebug, getHeapSpaceStats, "([J)V"),
NATIVE_METHOD(VMDebug, getInstructionCount, "([I)V"),
- NATIVE_METHOD(VMDebug, getLoadedClassCount, "()I"),
+ NATIVE_METHOD(VMDebug, getLoadedClassCount, "!()I"),
NATIVE_METHOD(VMDebug, getVmFeatureList, "()[Ljava/lang/String;"),
NATIVE_METHOD(VMDebug, infopoint, "(I)V"),
- NATIVE_METHOD(VMDebug, isDebuggerConnected, "()Z"),
- NATIVE_METHOD(VMDebug, isDebuggingEnabled, "()Z"),
+ NATIVE_METHOD(VMDebug, isDebuggerConnected, "!()Z"),
+ NATIVE_METHOD(VMDebug, isDebuggingEnabled, "!()Z"),
NATIVE_METHOD(VMDebug, getMethodTracingMode, "()I"),
- NATIVE_METHOD(VMDebug, lastDebuggerActivity, "()J"),
- NATIVE_METHOD(VMDebug, printLoadedClasses, "(I)V"),
+ NATIVE_METHOD(VMDebug, lastDebuggerActivity, "!()J"),
+ NATIVE_METHOD(VMDebug, printLoadedClasses, "!(I)V"),
NATIVE_METHOD(VMDebug, resetAllocCount, "(I)V"),
NATIVE_METHOD(VMDebug, resetInstructionCount, "()V"),
NATIVE_METHOD(VMDebug, startAllocCounting, "()V"),
@@ -338,7 +338,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, stopEmulatorTracing, "()V"),
NATIVE_METHOD(VMDebug, stopInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, stopMethodTracing, "()V"),
- NATIVE_METHOD(VMDebug, threadCpuTimeNanos, "()J"),
+ NATIVE_METHOD(VMDebug, threadCpuTimeNanos, "!()J"),
};
void register_dalvik_system_VMDebug(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0e2d921b7b..5c5eaa1ad4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -532,7 +532,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, concurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, disableJitCompilation, "()V"),
NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
- NATIVE_METHOD(VMRuntime, isDebuggerActive, "()Z"),
+ NATIVE_METHOD(VMRuntime, isDebuggerActive, "!()Z"),
NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"),
NATIVE_METHOD(VMRuntime, newNonMovableArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"),
NATIVE_METHOD(VMRuntime, newUnpaddedArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"),
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 7e02e29d3f..9975bf7c7d 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -26,42 +26,43 @@
namespace art {
-static jobject GetThreadStack(JNIEnv* env, jobject peer) {
- {
- ScopedObjectAccess soa(env);
- if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
- return soa.Self()->CreateInternalStackTrace(soa);
- }
- }
- // Suspend thread to build stack trace.
- bool timed_out;
- Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
- if (thread != NULL) {
- jobject trace;
- {
- ScopedObjectAccess soa(env);
- trace = thread->CreateInternalStackTrace(soa);
- }
- // Restart suspended thread.
- Runtime::Current()->GetThreadList()->Resume(thread, false);
- return trace;
+static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jobject trace = nullptr;
+ if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
+ trace = soa.Self()->CreateInternalStackTrace(soa);
} else {
- if (timed_out) {
- LOG(ERROR) << "Trying to get thread's stack failed as the thread failed to suspend within a "
- "generous timeout.";
+ // Suspend thread to build stack trace.
+ soa.Self()->TransitionFromRunnableToSuspended(kNative);
+ bool timed_out;
+ Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ if (thread != nullptr) {
+ // Must be runnable to create returned array.
+ CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
+ trace = thread->CreateInternalStackTrace(soa);
+ soa.Self()->TransitionFromRunnableToSuspended(kNative);
+ // Restart suspended thread.
+ Runtime::Current()->GetThreadList()->Resume(thread, false);
+ } else {
+ if (timed_out) {
+ LOG(ERROR) << "Trying to get thread's stack failed as the thread failed to suspend within a "
+ "generous timeout.";
+ }
}
- return NULL;
+ CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
}
+ return trace;
}
static jint VMStack_fillStackTraceElements(JNIEnv* env, jclass, jobject javaThread,
jobjectArray javaSteArray) {
- jobject trace = GetThreadStack(env, javaThread);
- if (trace == NULL) {
+ ScopedFastNativeObjectAccess soa(env);
+ jobject trace = GetThreadStack(soa, javaThread);
+ if (trace == nullptr) {
return 0;
}
int32_t depth;
- Thread::InternalStackTraceToStackTraceElementArray(env, trace, javaSteArray, &depth);
+ Thread::InternalStackTraceToStackTraceElementArray(soa, trace, javaSteArray, &depth);
return depth;
}
@@ -111,19 +112,20 @@ static jclass VMStack_getStackClass2(JNIEnv* env, jclass) {
}
static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject javaThread) {
- jobject trace = GetThreadStack(env, javaThread);
- if (trace == NULL) {
- return NULL;
+ ScopedFastNativeObjectAccess soa(env);
+ jobject trace = GetThreadStack(soa, javaThread);
+ if (trace == nullptr) {
+ return nullptr;
}
- return Thread::InternalStackTraceToStackTraceElementArray(env, trace);
+ return Thread::InternalStackTraceToStackTraceElementArray(soa, trace);
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(VMStack, fillStackTraceElements, "(Ljava/lang/Thread;[Ljava/lang/StackTraceElement;)I"),
+ NATIVE_METHOD(VMStack, fillStackTraceElements, "!(Ljava/lang/Thread;[Ljava/lang/StackTraceElement;)I"),
NATIVE_METHOD(VMStack, getCallingClassLoader, "!()Ljava/lang/ClassLoader;"),
NATIVE_METHOD(VMStack, getClosestUserClassLoader, "!(Ljava/lang/ClassLoader;Ljava/lang/ClassLoader;)Ljava/lang/ClassLoader;"),
NATIVE_METHOD(VMStack, getStackClass2, "!()Ljava/lang/Class;"),
- NATIVE_METHOD(VMStack, getThreadStackTrace, "(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;"),
+ NATIVE_METHOD(VMStack, getThreadStackTrace, "!(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;"),
};
void register_dalvik_system_VMStack(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_Zygote.cc b/runtime/native/dalvik_system_Zygote.cc
index 4d009db29e..030720748a 100644
--- a/runtime/native/dalvik_system_Zygote.cc
+++ b/runtime/native/dalvik_system_Zygote.cc
@@ -22,6 +22,7 @@
#include <paths.h>
#include <signal.h>
#include <stdlib.h>
+#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 8bf36e7e37..6daf9a951a 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -45,7 +45,7 @@ static mirror::Class* DecodeClass(const ScopedFastNativeObjectAccess& soa, jobje
// "name" is in "binary name" format, e.g. "dalvik.system.Debug$1".
static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean initialize,
jobject javaLoader) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
ScopedUtfChars name(env, javaName);
if (name.c_str() == nullptr) {
return nullptr;
@@ -96,7 +96,7 @@ static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) {
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Class, classForName, "(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, classForName, "!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
};
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index f6149fff44..636be5d2ab 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -92,12 +92,12 @@ static jlong Runtime_freeMemory(JNIEnv*, jclass) {
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Runtime, freeMemory, "()J"),
+ NATIVE_METHOD(Runtime, freeMemory, "!()J"),
NATIVE_METHOD(Runtime, gc, "()V"),
- NATIVE_METHOD(Runtime, maxMemory, "()J"),
+ NATIVE_METHOD(Runtime, maxMemory, "!()J"),
NATIVE_METHOD(Runtime, nativeExit, "(I)V"),
NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;"),
- NATIVE_METHOD(Runtime, totalMemory, "()J"),
+ NATIVE_METHOD(Runtime, totalMemory, "!()J"),
};
void register_java_lang_Runtime(JNIEnv* env) {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 2665a08ab0..de1b593c01 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -38,7 +38,7 @@ static jboolean Thread_interrupted(JNIEnv* env, jclass) {
}
static jboolean Thread_isInterrupted(JNIEnv* env, jobject java_thread) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
@@ -170,8 +170,8 @@ static void Thread_yield(JNIEnv*, jobject) {
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Thread, currentThread, "!()Ljava/lang/Thread;"),
- NATIVE_METHOD(Thread, interrupted, "()Z"),
- NATIVE_METHOD(Thread, isInterrupted, "()Z"),
+ NATIVE_METHOD(Thread, interrupted, "!()Z"),
+ NATIVE_METHOD(Thread, isInterrupted, "!()Z"),
NATIVE_METHOD(Thread, nativeCreate, "(Ljava/lang/Thread;JZ)V"),
NATIVE_METHOD(Thread, nativeGetStatus, "(Z)I"),
NATIVE_METHOD(Thread, nativeHoldsLock, "(Ljava/lang/Object;)Z"),
diff --git a/runtime/native/java_lang_Throwable.cc b/runtime/native/java_lang_Throwable.cc
index 332a1305e0..d1a1105d0d 100644
--- a/runtime/native/java_lang_Throwable.cc
+++ b/runtime/native/java_lang_Throwable.cc
@@ -15,26 +15,27 @@
*/
#include "jni_internal.h"
-#include "scoped_thread_state_change.h"
+#include "scoped_fast_native_object_access.h"
#include "thread.h"
namespace art {
static jobject Throwable_nativeFillInStackTrace(JNIEnv* env, jclass) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
return soa.Self()->CreateInternalStackTrace(soa);
}
static jobjectArray Throwable_nativeGetStackTrace(JNIEnv* env, jclass, jobject javaStackState) {
- if (javaStackState == NULL) {
- return NULL;
+ if (javaStackState == nullptr) {
+ return nullptr;
}
- return Thread::InternalStackTraceToStackTraceElementArray(env, javaStackState);
+ ScopedFastNativeObjectAccess soa(env);
+ return Thread::InternalStackTraceToStackTraceElementArray(soa, javaStackState);
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Throwable, nativeFillInStackTrace, "()Ljava/lang/Object;"),
- NATIVE_METHOD(Throwable, nativeGetStackTrace, "(Ljava/lang/Object;)[Ljava/lang/StackTraceElement;"),
+ NATIVE_METHOD(Throwable, nativeFillInStackTrace, "!()Ljava/lang/Object;"),
+ NATIVE_METHOD(Throwable, nativeGetStackTrace, "!(Ljava/lang/Object;)[Ljava/lang/StackTraceElement;"),
};
void register_java_lang_Throwable(JNIEnv* env) {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 314cdb1a7e..cb8e6237ad 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -18,14 +18,14 @@
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
-#include "scoped_thread_state_change.h"
+#include "scoped_fast_native_object_access.h"
#include "ScopedUtfChars.h"
#include "zip_archive.h"
namespace art {
static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader, jstring javaName) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
mirror::ClassLoader* loader = soa.Decode<mirror::ClassLoader*>(javaLoader);
ScopedUtfChars name(env, javaName);
if (name.c_str() == NULL) {
@@ -89,9 +89,9 @@ static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstri
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(VMClassLoader, findLoadedClass, "(Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/Class;"),
+ NATIVE_METHOD(VMClassLoader, findLoadedClass, "!(Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/Class;"),
NATIVE_METHOD(VMClassLoader, getBootClassPathResource, "(Ljava/lang/String;I)Ljava/lang/String;"),
- NATIVE_METHOD(VMClassLoader, getBootClassPathSize, "()I"),
+ NATIVE_METHOD(VMClassLoader, getBootClassPathSize, "!()I"),
};
void register_java_lang_VMClassLoader(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index c06bf4c9b7..a22d7caa06 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -22,7 +22,7 @@
#include "mirror/object-inl.h"
#include "object_utils.h"
#include "reflection.h"
-#include "scoped_thread_state_change.h"
+#include "scoped_fast_native_object_access.h"
#include "well_known_classes.h"
namespace art {
@@ -35,8 +35,7 @@ namespace art {
* with an interface, array, or primitive class.
*/
static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) {
- // TODO: ScopedFastNativeObjectAccess
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
jobject art_method = soa.Env()->GetObjectField(
javaMethod, WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 694f5e42ca..7e21d6c985 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -90,7 +90,7 @@ static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa, jobject j_rcv
class_or_rcvr = soa.Decode<mirror::Object*>(j_rcvr);
mirror::Class* declaringClass = f->GetDeclaringClass();
- if (!VerifyObjectInClass(class_or_rcvr, declaringClass)) {
+ if (!VerifyObjectIsClass(class_or_rcvr, declaringClass)) {
return false;
}
return true;
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index d29de3debe..0b8bb7bbe5 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -24,19 +24,19 @@
#include "mirror/proxy.h"
#include "object_utils.h"
#include "reflection.h"
-#include "scoped_thread_state_change.h"
+#include "scoped_fast_native_object_access.h"
#include "well_known_classes.h"
namespace art {
static jobject Method_invoke(JNIEnv* env,
jobject javaMethod, jobject javaReceiver, jobject javaArgs) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
}
static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
jobject art_method = soa.Env()->GetObjectField(
javaMethod, WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
@@ -59,8 +59,8 @@ static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Method, invoke, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
- NATIVE_METHOD(Method, getExceptionTypesNative, "()[Ljava/lang/Class;"),
+ NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
+ NATIVE_METHOD(Method, getExceptionTypesNative, "!()[Ljava/lang/Class;"),
};
void register_java_lang_reflect_Method(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index 1266c41417..07d670d51a 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -19,14 +19,14 @@
#include "mirror/class_loader.h"
#include "mirror/object_array.h"
#include "mirror/string.h"
-#include "scoped_thread_state_change.h"
+#include "scoped_fast_native_object_access.h"
#include "verify_object-inl.h"
namespace art {
static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring name, jobjectArray interfaces,
jobject loader, jobjectArray methods, jobjectArray throws) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* result = class_linker->CreateProxyClass(soa, name, interfaces, loader, methods,
throws);
@@ -34,7 +34,7 @@ static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring name, jobjectArra
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Proxy, generateProxy, "(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/ArtMethod;[[Ljava/lang/Class;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/ArtMethod;[[Ljava/lang/Class;)Ljava/lang/Class;"),
};
void register_java_lang_reflect_Proxy(JNIEnv* env) {
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 4f81a0b95a..1b9ebe42a3 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -18,7 +18,7 @@
#include "base/mutex.h"
#include "debugger.h"
#include "jni_internal.h"
-#include "scoped_thread_state_change.h"
+#include "scoped_fast_native_object_access.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
#include "stack.h"
@@ -31,7 +31,7 @@ static void DdmVmInternal_enableRecentAllocations(JNIEnv*, jclass, jboolean enab
}
static jbyteArray DdmVmInternal_getRecentAllocations(JNIEnv* env, jclass) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
return Dbg::GetRecentAllocations();
}
@@ -46,24 +46,24 @@ static jboolean DdmVmInternal_getRecentAllocationStatus(JNIEnv*, jclass) {
static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
// Suspend thread to build stack trace.
ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ jobjectArray trace = nullptr;
bool timed_out;
Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
if (thread != NULL) {
- jobject trace;
{
ScopedObjectAccess soa(env);
- trace = thread->CreateInternalStackTrace(soa);
+ jobject internal_trace = thread->CreateInternalStackTrace(soa);
+ trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
}
// Restart suspended thread.
thread_list->Resume(thread, false);
- return Thread::InternalStackTraceToStackTraceElementArray(env, trace);
} else {
if (timed_out) {
LOG(ERROR) << "Trying to get thread's stack by id failed as the thread failed to suspend "
"within a generous timeout.";
}
- return NULL;
}
+ return trace;
}
static void ThreadCountCallback(Thread*, void* context) {
@@ -136,7 +136,7 @@ static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) {
}
static jint DdmVmInternal_heapInfoNotify(JNIEnv* env, jclass, jint when) {
- ScopedObjectAccess soa(env);
+ ScopedFastNativeObjectAccess soa(env);
return Dbg::DdmHandleHpifChunk(static_cast<Dbg::HpifWhen>(when));
}
@@ -150,11 +150,11 @@ static void DdmVmInternal_threadNotify(JNIEnv*, jclass, jboolean enable) {
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DdmVmInternal, enableRecentAllocations, "(Z)V"),
- NATIVE_METHOD(DdmVmInternal, getRecentAllocations, "()[B"),
- NATIVE_METHOD(DdmVmInternal, getRecentAllocationStatus, "()Z"),
+ NATIVE_METHOD(DdmVmInternal, getRecentAllocations, "!()[B"),
+ NATIVE_METHOD(DdmVmInternal, getRecentAllocationStatus, "!()Z"),
NATIVE_METHOD(DdmVmInternal, getStackTraceById, "(I)[Ljava/lang/StackTraceElement;"),
NATIVE_METHOD(DdmVmInternal, getThreadStats, "()[B"),
- NATIVE_METHOD(DdmVmInternal, heapInfoNotify, "(I)Z"),
+ NATIVE_METHOD(DdmVmInternal, heapInfoNotify, "!(I)Z"),
NATIVE_METHOD(DdmVmInternal, heapSegmentNotify, "(IIZ)Z"),
NATIVE_METHOD(DdmVmInternal, threadNotify, "(Z)V"),
};
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 645d78cce8..744ac05dc6 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -17,22 +17,19 @@
#ifndef ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_
#define ART_RUNTIME_NATIVE_SCOPED_FAST_NATIVE_OBJECT_ACCESS_H_
-#include "base/casts.h"
-#include "jni_internal.h"
-#include "thread-inl.h"
#include "mirror/art_method.h"
-#include "verify_object.h"
+#include "scoped_thread_state_change.h"
namespace art {
// Variant of ScopedObjectAccess that does no runnable transitions. Should only be used by "fast"
// JNI methods.
-class ScopedFastNativeObjectAccess {
+class ScopedFastNativeObjectAccess : public ScopedObjectAccess {
public:
explicit ScopedFastNativeObjectAccess(JNIEnv* env)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE
- : env_(down_cast<JNIEnvExt*>(env)), self_(ThreadForEnv(env)) {
+ : ScopedObjectAccess(env) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK((*Self()->GetManagedStack()->GetTopQuickFrame())->IsFastNative());
// Don't work with raw objects in non-runnable states.
@@ -42,57 +39,8 @@ class ScopedFastNativeObjectAccess {
~ScopedFastNativeObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {
}
- Thread* Self() const {
- return self_;
- }
-
- JNIEnvExt* Env() const {
- return env_;
- }
-
- template<typename T>
- T Decode(jobject obj) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Locks::mutator_lock_->AssertSharedHeld(Self());
- // Don't work with raw objects in non-runnable states.
- DCHECK_EQ(Self()->GetState(), kRunnable);
- return down_cast<T>(Self()->DecodeJObject(obj));
- }
-
- mirror::ArtField* DecodeField(jfieldID fid) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Locks::mutator_lock_->AssertSharedHeld(Self());
- // Don't work with raw objects in non-runnable states.
- DCHECK_EQ(Self()->GetState(), kRunnable);
- return reinterpret_cast<mirror::ArtField*>(fid);
- }
-
- /*
- * Variant of ScopedObjectAccessUnched::AddLocalReference that without JNI work arounds
- * or check JNI that should be being used by fast native methods.
- */
- template<typename T>
- T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Locks::mutator_lock_->AssertSharedHeld(Self());
- // Don't work with raw objects in non-runnable states.
- DCHECK_EQ(Self()->GetState(), kRunnable);
- if (obj == NULL) {
- return NULL;
- }
-
- DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
-
- IndirectReferenceTable& locals = Env()->locals;
-
- uint32_t cookie = Env()->local_ref_cookie;
- IndirectRef ref = locals.Add(cookie, obj);
-
- return reinterpret_cast<T>(ref);
- }
-
private:
- JNIEnvExt* const env_;
- Thread* const self_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedFastNativeObjectAccess);
};
} // namespace art
diff --git a/runtime/oat.cc b/runtime/oat.cc
index d4eea85bb3..d04514f349 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '1', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '1', '8', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
@@ -132,7 +132,8 @@ const void* OatHeader::GetInterpreterToInterpreterBridge() const {
uint32_t OatHeader::GetInterpreterToInterpreterBridgeOffset() const {
DCHECK(IsValid());
- CHECK_GE(interpreter_to_interpreter_bridge_offset_, executable_offset_);
+ CHECK(interpreter_to_interpreter_bridge_offset_ == 0 ||
+ interpreter_to_interpreter_bridge_offset_ >= executable_offset_);
return interpreter_to_interpreter_bridge_offset_;
}
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index dd2bd4fafe..63801d3a3f 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -133,7 +133,7 @@ class ClassHelper {
} else if (klass_->IsArrayClass()) {
return 2;
} else if (klass_->IsProxyClass()) {
- return klass_->GetIfTable()->GetLength();
+ return klass_->GetIfTable()->Count();
} else {
const DexFile::TypeList* interfaces = GetInterfaceTypeList();
if (interfaces == nullptr) {
@@ -180,7 +180,7 @@ class ClassHelper {
std::string descriptor(GetDescriptor());
const DexFile& dex_file = GetDexFile();
const DexFile::ClassDef* dex_class_def = GetClassDef();
- CHECK(dex_class_def != nullptr);
+ CHECK(dex_class_def != nullptr) << "No class def for class " << PrettyClass(klass_);
return dex_file.GetSourceFile(*dex_class_def);
}
@@ -341,7 +341,7 @@ class MethodHelper {
shorty_ = nullptr;
}
- const mirror::ArtMethod* GetMethod() const {
+ mirror::ArtMethod* GetMethod() const {
return method_;
}
@@ -394,6 +394,20 @@ class MethodHelper {
return shorty_len_;
}
+ // Counts the number of references in the parameter list of the corresponding method.
+ // Note: Thus does _not_ include "this" for non-static methods.
+ uint32_t GetNumberOfReferenceArgsWithoutReceiver() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const char* shorty = GetShorty();
+ uint32_t refs = 0;
+ for (uint32_t i = 1; i < shorty_len_ ; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+
+ return refs;
+ }
+
const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 37db4624be..5717689516 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -15,6 +15,9 @@
*/
#include "parsed_options.h"
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+#endif
#include "debugger.h"
#include "monitor.h"
@@ -191,6 +194,36 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
profile_backoff_coefficient_ = 2.0;
profile_clock_source_ = kDefaultProfilerClockSource;
+ // Default to explicit checks. Switch off with -implicit-checks:.
+ // or setprop dalvik.vm.implicit_checks check1,check2,...
+#ifdef HAVE_ANDROID_OS
+ {
+ char buf[PROP_VALUE_MAX];
+ property_get("dalvik.vm.implicit_checks", buf, "none");
+ std::string checks(buf);
+ std::vector<std::string> checkvec;
+ Split(checks, ',', checkvec);
+ for (auto& str : checkvec) {
+ std::string val = Trim(str);
+ if (val == "none") {
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+ } else if (val == "null") {
+ explicit_checks_ &= ~kExplicitNullCheck;
+ } else if (val == "suspend") {
+ explicit_checks_ &= ~kExplicitSuspendCheck;
+ } else if (val == "stack") {
+ explicit_checks_ &= ~kExplicitStackOverflowCheck;
+ } else if (val == "all") {
+ explicit_checks_ = 0;
+ }
+ }
+ }
+#else
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+#endif
+
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -470,6 +503,54 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
if (!ParseDouble(option, ':', 1.0, 10.0, &profile_backoff_coefficient_)) {
return false;
}
+ } else if (StartsWith(option, "-implicit-checks:")) {
+ std::string checks;
+ if (!ParseStringAfterChar(option, ':', &checks)) {
+ return false;
+ }
+ std::vector<std::string> checkvec;
+ Split(checks, ',', checkvec);
+ for (auto& str : checkvec) {
+ std::string val = Trim(str);
+ if (val == "none") {
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+ } else if (val == "null") {
+ explicit_checks_ &= ~kExplicitNullCheck;
+ } else if (val == "suspend") {
+ explicit_checks_ &= ~kExplicitSuspendCheck;
+ } else if (val == "stack") {
+ explicit_checks_ &= ~kExplicitStackOverflowCheck;
+ } else if (val == "all") {
+ explicit_checks_ = 0;
+ } else {
+ return false;
+ }
+ }
+ } else if (StartsWith(option, "-explicit-checks:")) {
+ std::string checks;
+ if (!ParseStringAfterChar(option, ':', &checks)) {
+ return false;
+ }
+ std::vector<std::string> checkvec;
+ Split(checks, ',', checkvec);
+ for (auto& str : checkvec) {
+ std::string val = Trim(str);
+ if (val == "none") {
+ explicit_checks_ = 0;
+ } else if (val == "null") {
+ explicit_checks_ |= kExplicitNullCheck;
+ } else if (val == "suspend") {
+ explicit_checks_ |= kExplicitSuspendCheck;
+ } else if (val == "stack") {
+ explicit_checks_ |= kExplicitStackOverflowCheck;
+ } else if (val == "all") {
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
+ } else {
+ return false;
+ }
+ }
} else if (option == "-Xcompiler-option") {
i++;
if (i == options.size()) {
@@ -488,6 +569,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
StartsWith(option, "-da:") ||
StartsWith(option, "-enableassertions:") ||
StartsWith(option, "-disableassertions:") ||
+ (option == "--runtime-arg") ||
(option == "-esa") ||
(option == "-dsa") ||
(option == "-enablesystemassertions") ||
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index f07bba1240..d6516a80ad 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -81,6 +81,11 @@ class ParsedOptions {
double profile_backoff_coefficient_;
ProfilerClockSource profile_clock_source_;
+ static constexpr uint32_t kExplicitNullCheck = 1;
+ static constexpr uint32_t kExplicitSuspendCheck = 2;
+ static constexpr uint32_t kExplicitStackOverflowCheck = 4;
+ uint32_t explicit_checks_;
+
private:
ParsedOptions() {}
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
new file mode 100644
index 0000000000..6453cb4d48
--- /dev/null
+++ b/runtime/proxy_test.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_compiler_test.h"
+#include "mirror/art_field-inl.h"
+
+#include <jni.h>
+#include <vector>
+
+namespace art {
+
+class ProxyTest : public CommonCompilerTest {
+ public:
+ // Generate a proxy class with the given name and interfaces. This is a simplification from what
+ // libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and
+ // we do not declare exceptions.
+ mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobject jclass_loader,
+ const char* className,
+ const std::vector<mirror::Class*>& interfaces)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* javaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ CHECK(javaLangObject != nullptr);
+
+ jclass javaLangClass = soa.AddLocalReference<jclass>(mirror::Class::GetJavaLangClass());
+
+ // Builds the interfaces array.
+ jobjectArray proxyClassInterfaces = soa.Env()->NewObjectArray(interfaces.size(), javaLangClass,
+ nullptr);
+ soa.Self()->AssertNoPendingException();
+ for (size_t i = 0; i < interfaces.size(); ++i) {
+ soa.Env()->SetObjectArrayElement(proxyClassInterfaces, i,
+ soa.AddLocalReference<jclass>(interfaces[i]));
+ }
+
+ // Builds the method array.
+ jsize methods_count = 3; // Object.equals, Object.hashCode and Object.toString.
+ for (mirror::Class* interface : interfaces) {
+ mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods();
+ methods_count += (virtual_methods == nullptr) ? 0 : virtual_methods->GetLength();
+ }
+ jclass javaLangReflectArtMethod =
+ soa.AddLocalReference<jclass>(mirror::ArtMethod::GetJavaLangReflectArtMethod());
+ jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(methods_count,
+ javaLangReflectArtMethod, nullptr);
+ soa.Self()->AssertNoPendingException();
+
+ // Fill the method array
+ mirror::ArtMethod* equalsMethod = javaLangObject->FindDeclaredVirtualMethod("equals",
+ "(Ljava/lang/Object;)Z");
+ mirror::ArtMethod* hashCodeMethod = javaLangObject->FindDeclaredVirtualMethod("hashCode",
+ "()I");
+ mirror::ArtMethod* toStringMethod = javaLangObject->FindDeclaredVirtualMethod("toString",
+ "()Ljava/lang/String;");
+ CHECK(equalsMethod != nullptr);
+ CHECK(hashCodeMethod != nullptr);
+ CHECK(toStringMethod != nullptr);
+
+ jsize array_index = 0;
+ // Adds Object methods.
+ soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
+ soa.AddLocalReference<jobject>(equalsMethod));
+ soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
+ soa.AddLocalReference<jobject>(hashCodeMethod));
+ soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
+ soa.AddLocalReference<jobject>(toStringMethod));
+
+ // Now adds all interfaces virtual methods.
+ for (mirror::Class* interface : interfaces) {
+ mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods();
+ if (virtual_methods != nullptr) {
+ for (int32_t mth_index = 0; mth_index < virtual_methods->GetLength(); ++mth_index) {
+ mirror::ArtMethod* method = virtual_methods->Get(mth_index);
+ soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
+ soa.AddLocalReference<jobject>(method));
+ }
+ }
+ }
+ CHECK_EQ(array_index, methods_count);
+
+ // Builds an empty exception array.
+ jobjectArray proxyClassThrows = soa.Env()->NewObjectArray(0, javaLangClass, nullptr);
+ soa.Self()->AssertNoPendingException();
+
+ mirror::Class* proxyClass = class_linker_->CreateProxyClass(soa,
+ soa.Env()->NewStringUTF(className),
+ proxyClassInterfaces, jclass_loader,
+ proxyClassMethods, proxyClassThrows);
+ soa.Self()->AssertNoPendingException();
+ return proxyClass;
+ }
+};
+
+// Creates a proxy class and check ClassHelper works correctly.
+TEST_F(ProxyTest, ProxyClassHelper) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader = LoadDex("Interfaces");
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+
+ mirror::Class* I = class_linker_->FindClass(soa.Self(), "LInterfaces$I;", class_loader);
+ mirror::Class* J = class_linker_->FindClass(soa.Self(), "LInterfaces$J;", class_loader);
+ ASSERT_TRUE(I != nullptr);
+ ASSERT_TRUE(J != nullptr);
+ std::vector<mirror::Class*> interfaces;
+ interfaces.push_back(I);
+ interfaces.push_back(J);
+
+ mirror::Class* proxyClass = GenerateProxyClass(soa, jclass_loader, "$Proxy1234", interfaces);
+ ASSERT_TRUE(proxyClass != nullptr);
+ ASSERT_TRUE(proxyClass->IsProxyClass());
+
+ mirror::Class* javaIoSerializable = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
+ ASSERT_TRUE(javaIoSerializable != nullptr);
+
+ // Check ClassHelper for proxy.
+ ClassHelper kh(proxyClass);
+ EXPECT_EQ(kh.NumDirectInterfaces(), 3U); // java.io.Serializable, Interfaces$I and Interfaces$J.
+ EXPECT_EQ(javaIoSerializable, kh.GetDirectInterface(0));
+ EXPECT_EQ(I, kh.GetDirectInterface(1));
+ EXPECT_EQ(J, kh.GetDirectInterface(2));
+ std::string proxyClassDescriptor(kh.GetDescriptor());
+ EXPECT_EQ("L$Proxy1234;", proxyClassDescriptor);
+// EXPECT_EQ(nullptr, kh.GetSourceFile());
+}
+
+
+} // namespace art
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
new file mode 100644
index 0000000000..4388d31698
--- /dev/null
+++ b/runtime/quick/inline_method_analyser.cc
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "inline_method_analyser.h"
+#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/dex_cache-inl.h"
+#include "verifier/method_verifier.h"
+#include "verifier/method_verifier-inl.h"
+
+/*
+ * NOTE: This code is part of the quick compiler. It lives in the runtime
+ * only to allow the debugger to check whether a method has been inlined.
+ */
+
+namespace art {
+
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET),
+ check_iget_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE),
+ check_iget_wide_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
+ check_iget_object_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN),
+ check_iget_boolean_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE),
+ check_iget_byte_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR),
+ check_iget_char_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT),
+ check_iget_short_type);
+
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT),
+ check_iput_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE),
+ check_iput_wide_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT),
+ check_iput_object_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN),
+ check_iput_boolean_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE),
+ check_iput_byte_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR),
+ check_iput_char_type);
+COMPILE_ASSERT(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT),
+ check_iput_short_type);
+
+COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT), check_iget_iput_variant);
+COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), check_iget_iput_wide_variant);
+COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), check_iget_iput_object_variant);
+COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), check_iget_iput_boolean_variant);
+COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), check_iget_iput_byte_variant);
+COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), check_iget_iput_char_variant);
+COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
+ InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), check_iget_iput_short_variant);
+
+bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
+ InlineMethod* method) {
+ // We currently support only plain return or 2-instruction methods.
+
+ const DexFile::CodeItem* code_item = verifier->CodeItem();
+ DCHECK_NE(code_item->insns_size_in_code_units_, 0u);
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ Instruction::Code opcode = instruction->Opcode();
+
+ switch (opcode) {
+ case Instruction::RETURN_VOID:
+ method->opcode = kInlineOpNop;
+ method->flags = kInlineSpecial;
+ method->d.data = 0u;
+ return true;
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ case Instruction::RETURN_WIDE:
+ return AnalyseReturnMethod(code_item, method);
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST_HIGH16:
+ // TODO: Support wide constants (RETURN_WIDE).
+ return AnalyseConstMethod(code_item, method);
+ case Instruction::IGET:
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT:
+ case Instruction::IGET_WIDE:
+ return AnalyseIGetMethod(verifier, method);
+ case Instruction::IPUT:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_WIDE:
+ return AnalyseIPutMethod(verifier, method);
+ default:
+ return false;
+ }
+}
+
+bool InlineMethodAnalyser::AnalyseReturnMethod(const DexFile::CodeItem* code_item,
+ InlineMethod* result) {
+ const Instruction* return_instruction = Instruction::At(code_item->insns_);
+ Instruction::Code return_opcode = return_instruction->Opcode();
+ uint32_t reg = return_instruction->VRegA_11x();
+ uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ DCHECK_GE(reg, arg_start);
+ DCHECK_LT((return_opcode == Instruction::RETURN_WIDE) ? reg + 1 : reg,
+ code_item->registers_size_);
+
+ result->opcode = kInlineOpReturnArg;
+ result->flags = kInlineSpecial;
+ InlineReturnArgData* data = &result->d.return_data;
+ data->arg = reg - arg_start;
+ data->is_wide = (return_opcode == Instruction::RETURN_WIDE) ? 1u : 0u;
+ data->is_object = (return_opcode == Instruction::RETURN_OBJECT) ? 1u : 0u;
+ data->reserved = 0u;
+ data->reserved2 = 0u;
+ return true;
+}
+
+bool InlineMethodAnalyser::AnalyseConstMethod(const DexFile::CodeItem* code_item,
+ InlineMethod* result) {
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ const Instruction* return_instruction = instruction->Next();
+ Instruction::Code return_opcode = return_instruction->Opcode();
+ if (return_opcode != Instruction::RETURN &&
+ return_opcode != Instruction::RETURN_OBJECT) {
+ return false;
+ }
+
+ uint32_t return_reg = return_instruction->VRegA_11x();
+ DCHECK_LT(return_reg, code_item->registers_size_);
+
+ uint32_t vA, vB, dummy;
+ uint64_t dummy_wide;
+ instruction->Decode(vA, vB, dummy_wide, dummy, nullptr);
+ if (instruction->Opcode() == Instruction::CONST_HIGH16) {
+ vB <<= 16;
+ }
+ DCHECK_LT(vA, code_item->registers_size_);
+ if (vA != return_reg) {
+ return false; // Not returning the value set by const?
+ }
+ if (return_opcode == Instruction::RETURN_OBJECT && vB != 0) {
+ return false; // Returning non-null reference constant?
+ }
+ result->opcode = kInlineOpNonWideConst;
+ result->flags = kInlineSpecial;
+ result->d.data = static_cast<uint64_t>(vB);
+ return true;
+}
+
+bool InlineMethodAnalyser::AnalyseIGetMethod(verifier::MethodVerifier* verifier,
+ InlineMethod* result) {
+ const DexFile::CodeItem* code_item = verifier->CodeItem();
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ Instruction::Code opcode = instruction->Opcode();
+ DCHECK(IsInstructionIGet(opcode));
+
+ const Instruction* return_instruction = instruction->Next();
+ Instruction::Code return_opcode = return_instruction->Opcode();
+ if (!(return_opcode == Instruction::RETURN_WIDE && opcode == Instruction::IGET_WIDE) &&
+ !(return_opcode == Instruction::RETURN_OBJECT && opcode == Instruction::IGET_OBJECT) &&
+ !(return_opcode == Instruction::RETURN && opcode != Instruction::IGET_WIDE &&
+ opcode != Instruction::IGET_OBJECT)) {
+ return false;
+ }
+
+ uint32_t return_reg = return_instruction->VRegA_11x();
+ DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1 : return_reg,
+ code_item->registers_size_);
+
+ uint32_t dst_reg = instruction->VRegA_22c();
+ uint32_t object_reg = instruction->VRegB_22c();
+ uint32_t field_idx = instruction->VRegC_22c();
+ uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ DCHECK_GE(object_reg, arg_start);
+ DCHECK_LT(object_reg, code_item->registers_size_);
+ DCHECK_LT(opcode == Instruction::IGET_WIDE ? dst_reg + 1 : dst_reg, code_item->registers_size_);
+ if (dst_reg != return_reg) {
+ return false; // Not returning the value retrieved by IGET?
+ }
+
+ if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) {
+ // TODO: Support inlining IGET on other register than "this".
+ return false;
+ }
+
+ if (!ComputeSpecialAccessorInfo(field_idx, false, verifier, &result->d.ifield_data)) {
+ return false;
+ }
+
+ result->opcode = kInlineOpIGet;
+ result->flags = kInlineSpecial;
+ InlineIGetIPutData* data = &result->d.ifield_data;
+ data->op_variant = IGetVariant(opcode);
+ data->object_arg = object_reg - arg_start; // Allow IGET on any register, not just "this".
+ data->src_arg = 0;
+ data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
+ data->reserved = 0;
+ return true;
+}
+
+bool InlineMethodAnalyser::AnalyseIPutMethod(verifier::MethodVerifier* verifier,
+ InlineMethod* result) {
+ const DexFile::CodeItem* code_item = verifier->CodeItem();
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ Instruction::Code opcode = instruction->Opcode();
+ DCHECK(IsInstructionIPut(opcode));
+
+ const Instruction* return_instruction = instruction->Next();
+ Instruction::Code return_opcode = return_instruction->Opcode();
+ if (return_opcode != Instruction::RETURN_VOID) {
+ // TODO: Support returning an argument.
+ // This is needed by builder classes and generated accessor setters.
+ // builder.setX(value): iput value, this, fieldX; return-object this;
+ // object.access$nnn(value): iput value, this, fieldX; return value;
+ // Use InlineIGetIPutData::reserved to hold the information.
+ return false;
+ }
+
+ uint32_t src_reg = instruction->VRegA_22c();
+ uint32_t object_reg = instruction->VRegB_22c();
+ uint32_t field_idx = instruction->VRegC_22c();
+ uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ DCHECK_GE(object_reg, arg_start);
+ DCHECK_LT(object_reg, code_item->registers_size_);
+ DCHECK_GE(src_reg, arg_start);
+ DCHECK_LT(opcode == Instruction::IPUT_WIDE ? src_reg + 1 : src_reg, code_item->registers_size_);
+
+ if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) {
+ // TODO: Support inlining IPUT on other register than "this".
+ return false;
+ }
+
+ if (!ComputeSpecialAccessorInfo(field_idx, true, verifier, &result->d.ifield_data)) {
+ return false;
+ }
+
+ result->opcode = kInlineOpIPut;
+ result->flags = kInlineSpecial;
+ InlineIGetIPutData* data = &result->d.ifield_data;
+ data->op_variant = IPutVariant(opcode);
+ data->object_arg = object_reg - arg_start; // Allow IPUT on any register, not just "this".
+ data->src_arg = src_reg - arg_start;
+ data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
+ data->reserved = 0;
+ return true;
+}
+
+bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
+ verifier::MethodVerifier* verifier,
+ InlineIGetIPutData* result) {
+ mirror::DexCache* dex_cache = verifier->GetDexCache();
+ uint32_t method_idx = verifier->GetMethodReference().dex_method_index;
+ mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
+ mirror::ArtField* field = dex_cache->GetResolvedField(field_idx);
+ if (method == nullptr || field == nullptr || field->IsStatic()) {
+ return false;
+ }
+ mirror::Class* method_class = method->GetDeclaringClass();
+ mirror::Class* field_class = field->GetDeclaringClass();
+ if (!method_class->CanAccessResolvedField(field_class, field, dex_cache, field_idx) ||
+ (is_put && field->IsFinal() && method_class != field_class)) {
+ return false;
+ }
+ DCHECK_GE(field->GetOffset().Int32Value(), 0);
+ result->field_idx = field_idx;
+ result->field_offset = field->GetOffset().Int32Value();
+ result->is_volatile = field->IsVolatile();
+ return true;
+}
+
+} // namespace art
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
new file mode 100644
index 0000000000..8e1a4083cd
--- /dev/null
+++ b/runtime/quick/inline_method_analyser.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_QUICK_INLINE_METHOD_ANALYSER_H_
+#define ART_RUNTIME_QUICK_INLINE_METHOD_ANALYSER_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+
+/*
+ * NOTE: This code is part of the quick compiler. It lives in the runtime
+ * only to allow the debugger to check whether a method has been inlined.
+ */
+
+namespace art {
+
+namespace verifier {
+class MethodVerifier;
+} // namespace verifier
+
+enum InlineMethodOpcode : uint16_t {
+ kIntrinsicDoubleCvt,
+ kIntrinsicFloatCvt,
+ kIntrinsicReverseBytes,
+ kIntrinsicAbsInt,
+ kIntrinsicAbsLong,
+ kIntrinsicAbsFloat,
+ kIntrinsicAbsDouble,
+ kIntrinsicMinMaxInt,
+ kIntrinsicSqrt,
+ kIntrinsicCharAt,
+ kIntrinsicCompareTo,
+ kIntrinsicIsEmptyOrLength,
+ kIntrinsicIndexOf,
+ kIntrinsicCurrentThread,
+ kIntrinsicPeek,
+ kIntrinsicPoke,
+ kIntrinsicCas,
+ kIntrinsicUnsafeGet,
+ kIntrinsicUnsafePut,
+
+ kInlineOpNop,
+ kInlineOpReturnArg,
+ kInlineOpNonWideConst,
+ kInlineOpIGet,
+ kInlineOpIPut,
+};
+std::ostream& operator<<(std::ostream& os, const InlineMethodOpcode& rhs);
+
+enum InlineMethodFlags : uint16_t {
+ kNoInlineMethodFlags = 0x0000,
+ kInlineIntrinsic = 0x0001,
+ kInlineSpecial = 0x0002,
+};
+
+// IntrinsicFlags are stored in InlineMethod::d::raw_data
+enum IntrinsicFlags {
+ kIntrinsicFlagNone = 0,
+
+ // kIntrinsicMinMaxInt
+ kIntrinsicFlagMax = kIntrinsicFlagNone,
+ kIntrinsicFlagMin = 1,
+
+ // kIntrinsicIsEmptyOrLength
+ kIntrinsicFlagLength = kIntrinsicFlagNone,
+ kIntrinsicFlagIsEmpty = kIntrinsicFlagMin,
+
+ // kIntrinsicIndexOf
+ kIntrinsicFlagBase0 = kIntrinsicFlagMin,
+
+ // kIntrinsicUnsafeGet, kIntrinsicUnsafePut, kIntrinsicUnsafeCas
+ kIntrinsicFlagIsLong = kIntrinsicFlagMin,
+ // kIntrinsicUnsafeGet, kIntrinsicUnsafePut
+ kIntrinsicFlagIsVolatile = 2,
+ // kIntrinsicUnsafePut, kIntrinsicUnsafeCas
+ kIntrinsicFlagIsObject = 4,
+ // kIntrinsicUnsafePut
+ kIntrinsicFlagIsOrdered = 8,
+};
+
+struct InlineIGetIPutData {
+ // The op_variant below is opcode-Instruction::IGET for IGETs and
+ // opcode-Instruction::IPUT for IPUTs. This is because the runtime
+ // doesn't know the OpSize enumeration.
+ uint16_t op_variant : 3;
+ uint16_t object_arg : 4;
+ uint16_t src_arg : 4; // iput only
+ uint16_t method_is_static : 1;
+ uint16_t reserved : 4;
+ uint16_t field_idx;
+ uint32_t is_volatile : 1;
+ uint32_t field_offset : 31;
+};
+COMPILE_ASSERT(sizeof(InlineIGetIPutData) == sizeof(uint64_t), InvalidSizeOfInlineIGetIPutData);
+
+struct InlineReturnArgData {
+ uint16_t arg;
+ uint16_t is_wide : 1;
+ uint16_t is_object : 1;
+ uint16_t reserved : 14;
+ uint32_t reserved2;
+};
+COMPILE_ASSERT(sizeof(InlineReturnArgData) == sizeof(uint64_t), InvalidSizeOfInlineReturnArgData);
+
+struct InlineMethod {
+ InlineMethodOpcode opcode;
+ InlineMethodFlags flags;
+ union {
+ uint64_t data;
+ InlineIGetIPutData ifield_data;
+ InlineReturnArgData return_data;
+ } d;
+};
+
+class InlineMethodAnalyser {
+ public:
+ /**
+ * Analyse method code to determine if the method is a candidate for inlining.
+ * If it is, record the inlining data.
+ *
+ * @param verifier the method verifier holding data about the method to analyse.
+ * @param method placeholder for the inline method data.
+ * @return true if the method is a candidate for inlining, false otherwise.
+ */
+ static bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static constexpr bool IsInstructionIGet(Instruction::Code opcode) {
+ return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT;
+ }
+
+ static constexpr bool IsInstructionIPut(Instruction::Code opcode) {
+ return Instruction::IPUT <= opcode && opcode <= Instruction::IPUT_SHORT;
+ }
+
+ static constexpr uint16_t IGetVariant(Instruction::Code opcode) {
+ return opcode - Instruction::IGET;
+ }
+
+ static constexpr uint16_t IPutVariant(Instruction::Code opcode) {
+ return opcode - Instruction::IPUT;
+ }
+
+ private:
+ static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
+ static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
+ static bool AnalyseIGetMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static bool AnalyseIPutMethod(verifier::MethodVerifier* verifier, InlineMethod* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can we fast path instance field access in a verified accessor?
+ // If yes, computes field's offset and volatility and whether the method is static or not.
+ static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put,
+ verifier::MethodVerifier* verifier,
+ InlineIGetIPutData* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_QUICK_INLINE_METHOD_ANALYSER_H_
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 0bfa70f279..dde9a942ff 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -19,7 +19,6 @@
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
-#include "invoke_arg_array_builder.h"
#include "jni_internal.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -29,12 +28,440 @@
#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "scoped_thread_state_change.h"
+#include "stack.h"
#include "well_known_classes.h"
namespace art {
-jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject javaReceiver,
- jobject javaArgs) {
+class ArgArray {
+ public:
+ explicit ArgArray(const char* shorty, uint32_t shorty_len)
+ : shorty_(shorty), shorty_len_(shorty_len), num_bytes_(0) {
+ size_t num_slots = shorty_len + 1; // +1 in case of receiver.
+ if (LIKELY((num_slots * 2) < kSmallArgArraySize)) {
+ // We can trivially use the small arg array.
+ arg_array_ = small_arg_array_;
+ } else {
+ // Analyze shorty to see if we need the large arg array.
+ for (size_t i = 1; i < shorty_len; ++i) {
+ char c = shorty[i];
+ if (c == 'J' || c == 'D') {
+ num_slots++;
+ }
+ }
+ if (num_slots <= kSmallArgArraySize) {
+ arg_array_ = small_arg_array_;
+ } else {
+ large_arg_array_.reset(new uint32_t[num_slots]);
+ arg_array_ = large_arg_array_.get();
+ }
+ }
+ }
+
+ uint32_t* GetArray() {
+ return arg_array_;
+ }
+
+ uint32_t GetNumBytes() {
+ return num_bytes_;
+ }
+
+ void Append(uint32_t value) {
+ arg_array_[num_bytes_ / 4] = value;
+ num_bytes_ += 4;
+ }
+
+ void Append(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
+ }
+
+ void AppendWide(uint64_t value) {
+ // For ARM and MIPS portable, align wide values to 8 bytes (ArgArray starts at offset of 4).
+#if defined(ART_USE_PORTABLE_COMPILER) && (defined(__arm__) || defined(__mips__))
+ if (num_bytes_ % 8 == 0) {
+ num_bytes_ += 4;
+ }
+#endif
+ arg_array_[num_bytes_ / 4] = value;
+ arg_array_[(num_bytes_ / 4) + 1] = value >> 32;
+ num_bytes_ += 8;
+ }
+
+ void AppendFloat(float value) {
+ jvalue jv;
+ jv.f = value;
+ Append(jv.i);
+ }
+
+ void AppendDouble(double value) {
+ jvalue jv;
+ jv.d = value;
+ AppendWide(jv.j);
+ }
+
+ void BuildArgArrayFromVarArgs(const ScopedObjectAccess& soa, mirror::Object* receiver, va_list ap)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Set receiver if non-null (method is not static)
+ if (receiver != nullptr) {
+ Append(receiver);
+ }
+ for (size_t i = 1; i < shorty_len_; ++i) {
+ switch (shorty_[i]) {
+ case 'Z':
+ case 'B':
+ case 'C':
+ case 'S':
+ case 'I':
+ Append(va_arg(ap, jint));
+ break;
+ case 'F':
+ AppendFloat(va_arg(ap, jdouble));
+ break;
+ case 'L':
+ Append(soa.Decode<mirror::Object*>(va_arg(ap, jobject)));
+ break;
+ case 'D':
+ AppendDouble(va_arg(ap, jdouble));
+ break;
+ case 'J':
+ AppendWide(va_arg(ap, jlong));
+ break;
+#ifndef NDEBUG
+ default:
+ LOG(FATAL) << "Unexpected shorty character: " << shorty_[i];
+#endif
+ }
+ }
+ }
+
+ void BuildArgArrayFromJValues(const ScopedObjectAccessUnchecked& soa, mirror::Object* receiver,
+ jvalue* args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Set receiver if non-null (method is not static)
+ if (receiver != nullptr) {
+ Append(receiver);
+ }
+ for (size_t i = 1, args_offset = 0; i < shorty_len_; ++i, ++args_offset) {
+ switch (shorty_[i]) {
+ case 'Z':
+ Append(args[args_offset].z);
+ break;
+ case 'B':
+ Append(args[args_offset].b);
+ break;
+ case 'C':
+ Append(args[args_offset].c);
+ break;
+ case 'S':
+ Append(args[args_offset].s);
+ break;
+ case 'I':
+ case 'F':
+ Append(args[args_offset].i);
+ break;
+ case 'L':
+ Append(soa.Decode<mirror::Object*>(args[args_offset].l));
+ break;
+ case 'D':
+ case 'J':
+ AppendWide(args[args_offset].j);
+ break;
+#ifndef NDEBUG
+ default:
+ LOG(FATAL) << "Unexpected shorty character: " << shorty_[i];
+#endif
+ }
+ }
+ }
+
+ void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Set receiver if non-null (method is not static)
+ size_t cur_arg = arg_offset;
+ if (!shadow_frame->GetMethod()->IsStatic()) {
+ Append(shadow_frame->GetVReg(cur_arg));
+ cur_arg++;
+ }
+ for (size_t i = 1; i < shorty_len_; ++i) {
+ switch (shorty_[i]) {
+ case 'Z':
+ case 'B':
+ case 'C':
+ case 'S':
+ case 'I':
+ case 'F':
+ case 'L':
+ Append(shadow_frame->GetVReg(cur_arg));
+ cur_arg++;
+ break;
+ case 'D':
+ case 'J':
+ AppendWide(shadow_frame->GetVRegLong(cur_arg));
+ cur_arg++;
+ cur_arg++;
+ break;
+#ifndef NDEBUG
+ default:
+ LOG(FATAL) << "Unexpected shorty character: " << shorty_[i];
+#endif
+ }
+ }
+ }
+
+ static void ThrowIllegalPrimitiveArgumentException(const char* expected,
+ const StringPiece& found_descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowIllegalArgumentException(nullptr,
+ StringPrintf("Invalid primitive conversion from %s to %s", expected,
+ PrettyDescriptor(found_descriptor.as_string()).c_str()).c_str());
+ }
+
+ bool BuildArgArrayFromObjectArray(const ScopedObjectAccess& soa, mirror::Object* receiver,
+ mirror::ObjectArray<mirror::Object>* args, MethodHelper& mh)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile::TypeList* classes = mh.GetParameterTypeList();
+ // Set receiver if non-null (method is not static)
+ if (receiver != nullptr) {
+ Append(receiver);
+ }
+ for (size_t i = 1, args_offset = 0; i < shorty_len_; ++i, ++args_offset) {
+ mirror::Object* arg = args->Get(args_offset);
+ if (((shorty_[i] == 'L') && (arg != nullptr)) || ((arg == nullptr && shorty_[i] != 'L'))) {
+ mirror::Class* dst_class =
+ mh.GetClassFromTypeIdx(classes->GetTypeItem(args_offset).type_idx_);
+ if (UNLIKELY(arg == nullptr || !arg->InstanceOf(dst_class))) {
+ ThrowIllegalArgumentException(nullptr,
+ StringPrintf("method %s argument %zd has type %s, got %s",
+ PrettyMethod(mh.GetMethod(), false).c_str(),
+ args_offset + 1, // Humans don't count from 0.
+ PrettyDescriptor(dst_class).c_str(),
+ PrettyTypeOf(arg).c_str()).c_str());
+ return false;
+ }
+ }
+
+#define DO_FIRST_ARG(match_descriptor, get_fn, append) { \
+ const StringPiece src_descriptor(arg != nullptr \
+ ? ClassHelper(arg->GetClass<>()).GetDescriptor() \
+ : "null"); \
+ if (LIKELY(src_descriptor == match_descriptor)) { \
+ mirror::ArtField* primitive_field = arg->GetClass()->GetIFields()->Get(0); \
+ append(primitive_field-> get_fn(arg));
+
+#define DO_ARG(match_descriptor, get_fn, append) \
+ } else if (LIKELY(src_descriptor == match_descriptor)) { \
+ mirror::ArtField* primitive_field = arg->GetClass()->GetIFields()->Get(0); \
+ append(primitive_field-> get_fn(arg));
+
+#define DO_FAIL(expected) \
+ } else { \
+ if (arg->GetClass<>()->IsPrimitive()) { \
+ ThrowIllegalPrimitiveArgumentException(expected, src_descriptor); \
+ } else { \
+ ThrowIllegalArgumentException(nullptr, \
+ StringPrintf("method %s argument %zd has type %s, got %s", \
+ PrettyMethod(mh.GetMethod(), false).c_str(), \
+ args_offset + 1, \
+ expected, \
+ PrettyTypeOf(arg).c_str()).c_str()); \
+ } \
+ return false; \
+ } }
+
+ switch (shorty_[i]) {
+ case 'L':
+ Append(arg);
+ break;
+ case 'Z':
+ DO_FIRST_ARG("Ljava/lang/Boolean;", GetBoolean, Append)
+ DO_FAIL("boolean")
+ break;
+ case 'B':
+ DO_FIRST_ARG("Ljava/lang/Byte;", GetByte, Append)
+ DO_FAIL("byte")
+ break;
+ case 'C':
+ DO_FIRST_ARG("Ljava/lang/Character;", GetChar, Append)
+ DO_FAIL("char")
+ break;
+ case 'S':
+ DO_FIRST_ARG("Ljava/lang/Short;", GetShort, Append)
+ DO_ARG("Ljava/lang/Byte;", GetByte, Append)
+ DO_FAIL("short")
+ break;
+ case 'I':
+ DO_FIRST_ARG("Ljava/lang/Integer;", GetInt, Append)
+ DO_ARG("Ljava/lang/Character;", GetChar, Append)
+ DO_ARG("Ljava/lang/Short;", GetShort, Append)
+ DO_ARG("Ljava/lang/Byte;", GetByte, Append)
+ DO_FAIL("int")
+ break;
+ case 'J':
+ DO_FIRST_ARG("Ljava/lang/Long;", GetLong, AppendWide)
+ DO_ARG("Ljava/lang/Integer;", GetInt, AppendWide)
+ DO_ARG("Ljava/lang/Character;", GetChar, AppendWide)
+ DO_ARG("Ljava/lang/Short;", GetShort, AppendWide)
+ DO_ARG("Ljava/lang/Byte;", GetByte, AppendWide)
+ DO_FAIL("long")
+ break;
+ case 'F':
+ DO_FIRST_ARG("Ljava/lang/Float;", GetFloat, AppendFloat)
+ DO_ARG("Ljava/lang/Long;", GetLong, AppendFloat)
+ DO_ARG("Ljava/lang/Integer;", GetInt, AppendFloat)
+ DO_ARG("Ljava/lang/Character;", GetChar, AppendFloat)
+ DO_ARG("Ljava/lang/Short;", GetShort, AppendFloat)
+ DO_ARG("Ljava/lang/Byte;", GetByte, AppendFloat)
+ DO_FAIL("float")
+ break;
+ case 'D':
+ DO_FIRST_ARG("Ljava/lang/Double;", GetDouble, AppendDouble)
+ DO_ARG("Ljava/lang/Float;", GetFloat, AppendDouble)
+ DO_ARG("Ljava/lang/Long;", GetLong, AppendDouble)
+ DO_ARG("Ljava/lang/Integer;", GetInt, AppendDouble)
+ DO_ARG("Ljava/lang/Character;", GetChar, AppendDouble)
+ DO_ARG("Ljava/lang/Short;", GetShort, AppendDouble)
+ DO_ARG("Ljava/lang/Byte;", GetByte, AppendDouble)
+ DO_FAIL("double")
+ break;
+#ifndef NDEBUG
+ default:
+ LOG(FATAL) << "Unexpected shorty character: " << shorty_[i];
+#endif
+ }
+#undef DO_FIRST_ARG
+#undef DO_ARG
+#undef DO_FAIL
+ }
+ return true;
+ }
+
+ private:
+ enum { kSmallArgArraySize = 16 };
+ const char* const shorty_;
+ const uint32_t shorty_len_;
+ uint32_t num_bytes_;
+ uint32_t* arg_array_;
+ uint32_t small_arg_array_[kSmallArgArraySize];
+ UniquePtr<uint32_t[]> large_arg_array_;
+};
+
+static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile::TypeList* params = MethodHelper(m).GetParameterTypeList();
+ if (params == nullptr) {
+ return; // No arguments so nothing to check.
+ }
+ uint32_t offset = 0;
+ uint32_t num_params = params->Size();
+ size_t error_count = 0;
+ if (!m->IsStatic()) {
+ offset = 1;
+ }
+ for (uint32_t i = 0; i < num_params; i++) {
+ uint16_t type_idx = params->GetTypeItem(i).type_idx_;
+ mirror::Class* param_type = MethodHelper(m).GetClassFromTypeIdx(type_idx);
+ if (param_type == nullptr) {
+ Thread* self = Thread::Current();
+ CHECK(self->IsExceptionPending());
+ LOG(ERROR) << "Internal error: unresolvable type for argument type in JNI invoke: "
+ << MethodHelper(m).GetTypeDescriptorFromTypeIdx(type_idx) << "\n"
+ << self->GetException(nullptr)->Dump();
+ self->ClearException();
+ ++error_count;
+ } else if (!param_type->IsPrimitive()) {
+ // TODO: check primitives are in range.
+ mirror::Object* argument = reinterpret_cast<mirror::Object*>(args[i + offset]);
+ if (argument != nullptr && !argument->InstanceOf(param_type)) {
+ LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of "
+ << PrettyTypeOf(argument) << " as argument " << (i + 1)
+ << " to " << PrettyMethod(m);
+ ++error_count;
+ }
+ } else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) {
+ offset++;
+ }
+ }
+ if (error_count > 0) {
+ // TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort
+ // with an argument.
+ JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
+ PrettyMethod(m).c_str());
+ }
+}
+
+static mirror::ArtMethod* FindVirtualMethod(mirror::Object* receiver,
+ mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method);
+}
+
+
+static void InvokeWithArgArray(const ScopedObjectAccessUnchecked& soa, mirror::ArtMethod* method,
+ ArgArray* arg_array, JValue* result, const char* shorty)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t* args = arg_array->GetArray();
+ if (UNLIKELY(soa.Env()->check_jni)) {
+ CheckMethodArguments(method, args);
+ }
+ method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
+}
+
+JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
+ MethodHelper mh(method);
+ JValue result;
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
+ InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
+ return result;
+}
+
+JValue InvokeWithJValues(const ScopedObjectAccessUnchecked& soa, mirror::Object* receiver,
+ jmethodID mid, jvalue* args) {
+ mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ MethodHelper mh(method);
+ JValue result;
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ arg_array.BuildArgArrayFromJValues(soa, receiver, args);
+ InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
+ return result;
+}
+
+JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa,
+ mirror::Object* receiver, jmethodID mid, jvalue* args) {
+ mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ MethodHelper mh(method);
+ JValue result;
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ arg_array.BuildArgArrayFromJValues(soa, receiver, args);
+ InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
+ return result;
+}
+
+JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccess& soa,
+ jobject obj, jmethodID mid, va_list args) {
+ mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
+ mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ MethodHelper mh(method);
+ JValue result;
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
+ InvokeWithArgArray(soa, method, &arg_array, &result, mh.GetShorty());
+ return result;
+}
+
+void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset,
+ MethodHelper& mh, JValue* result) {
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset);
+ shadow_frame->GetMethod()->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result,
+ mh.GetShorty());
+}
+
+jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod,
+ jobject javaReceiver, jobject javaArgs) {
jmethodID mid = soa.Env()->FromReflectedMethod(javaMethod);
mirror::ArtMethod* m = soa.DecodeMethod(mid);
@@ -47,17 +474,16 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject
declaring_class = sirt_c.get();
}
- mirror::Object* receiver = NULL;
+ mirror::Object* receiver = nullptr;
if (!m->IsStatic()) {
// Check that the receiver is non-null and an instance of the field's declaring class.
receiver = soa.Decode<mirror::Object*>(javaReceiver);
- if (!VerifyObjectInClass(receiver, declaring_class)) {
+ if (!VerifyObjectIsClass(receiver, declaring_class)) {
return NULL;
}
// Find the actual implementation of the virtual method.
m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m);
- mid = soa.EncodeMethod(m);
}
// Get our arrays of arguments and their types, and check they're the same size.
@@ -65,8 +491,8 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject
soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
MethodHelper mh(m);
const DexFile::TypeList* classes = mh.GetParameterTypeList();
- uint32_t classes_size = classes == NULL ? 0 : classes->Size();
- uint32_t arg_count = (objects != NULL) ? objects->GetLength() : 0;
+ uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
+ uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
if (arg_count != classes_size) {
ThrowIllegalArgumentException(NULL,
StringPrintf("Wrong number of arguments; expected %d, got %d",
@@ -74,22 +500,15 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject
return NULL;
}
- // Translate javaArgs to a jvalue[].
- UniquePtr<jvalue[]> args(new jvalue[arg_count]);
- JValue* decoded_args = reinterpret_cast<JValue*>(args.get());
- for (uint32_t i = 0; i < arg_count; ++i) {
- mirror::Object* arg = objects->Get(i);
- mirror::Class* dst_class = mh.GetClassFromTypeIdx(classes->GetTypeItem(i).type_idx_);
- if (!UnboxPrimitiveForArgument(arg, dst_class, decoded_args[i], m, i)) {
- return NULL;
- }
- if (!dst_class->IsPrimitive()) {
- args[i].l = soa.AddLocalReference<jobject>(arg);
- }
+ // Invoke the method.
+ JValue result;
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ if (!arg_array.BuildArgArrayFromObjectArray(soa, receiver, objects, mh)) {
+ CHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
}
- // Invoke the method.
- JValue value(InvokeWithJValues(soa, javaReceiver, mid, args.get()));
+ InvokeWithArgArray(soa, m, &arg_array, &result, mh.GetShorty());
// Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early.
if (soa.Self()->IsExceptionPending()) {
@@ -103,10 +522,11 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod, jobject
}
// Box if necessary and return.
- return soa.AddLocalReference<jobject>(BoxPrimitive(mh.GetReturnType()->GetPrimitiveType(), value));
+ return soa.AddLocalReference<jobject>(BoxPrimitive(mh.GetReturnType()->GetPrimitiveType(),
+ result));
}
-bool VerifyObjectInClass(mirror::Object* o, mirror::Class* c) {
+bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
if (o == NULL) {
ThrowNullPointerException(NULL, "null receiver");
return false;
@@ -218,6 +638,10 @@ mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) {
if (src_class == Primitive::kPrimNot) {
return value.GetL();
}
+ if (src_class == Primitive::kPrimVoid) {
+ // There's no such thing as a void field, and void methods invoked via reflection return null.
+ return nullptr;
+ }
jmethodID m = NULL;
const char* shorty;
@@ -254,20 +678,15 @@ mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) {
m = WellKnownClasses::java_lang_Short_valueOf;
shorty = "LS";
break;
- case Primitive::kPrimVoid:
- // There's no such thing as a void field, and void methods invoked via reflection return null.
- return nullptr;
default:
LOG(FATAL) << static_cast<int>(src_class);
shorty = nullptr;
}
ScopedObjectAccessUnchecked soa(Thread::Current());
- if (kIsDebugBuild) {
- CHECK_EQ(soa.Self()->GetState(), kRunnable);
- }
+ DCHECK_EQ(soa.Self()->GetState(), kRunnable);
- ArgArray arg_array(nullptr, 0);
+ ArgArray arg_array(shorty, 2);
JValue result;
if (src_class == Primitive::kPrimDouble || src_class == Primitive::kPrimLong) {
arg_array.AppendWide(value.GetJ());
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 13c90af895..d2f9f25e55 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -28,7 +28,10 @@ namespace mirror {
class Object;
} // namespace mirror
union JValue;
+class MethodHelper;
class ScopedObjectAccess;
+class ScopedObjectAccessUnchecked;
+class ShadowFrame;
class ThrowLocation;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
@@ -48,10 +51,30 @@ bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_r
const JValue& src, JValue& dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver, jobject args)
+JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool VerifyObjectInClass(mirror::Object* o, mirror::Class* c)
+JValue InvokeWithJValues(const ScopedObjectAccessUnchecked& soa, mirror::Object* receiver,
+ jmethodID mid, jvalue* args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccess& soa,
+ mirror::Object* receiver, jmethodID mid, jvalue* args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccess& soa,
+ jobject obj, jmethodID mid, va_list args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset,
+ MethodHelper& mh, JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver,
+ jobject args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
new file mode 100644
index 0000000000..f7fc0202ca
--- /dev/null
+++ b/runtime/reflection_test.cc
@@ -0,0 +1,628 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reflection.h"
+
+#include <float.h>
+#include <limits.h>
+
+#include "common_compiler_test.h"
+#include "mirror/art_method-inl.h"
+
+namespace art {
+
+// TODO: Convert to CommonRuntimeTest. Currently MakeExecutable is used.
+class ReflectionTest : public CommonCompilerTest {
+ protected:
+ virtual void SetUp() {
+ CommonCompilerTest::SetUp();
+
+ vm_ = Runtime::Current()->GetJavaVM();
+
+ // Turn on -verbose:jni for the JNI tests.
+ // gLogVerbosity.jni = true;
+
+ vm_->AttachCurrentThread(&env_, NULL);
+
+ ScopedLocalRef<jclass> aioobe(env_,
+ env_->FindClass("java/lang/ArrayIndexOutOfBoundsException"));
+ CHECK(aioobe.get() != NULL);
+ aioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(aioobe.get()));
+
+ ScopedLocalRef<jclass> ase(env_, env_->FindClass("java/lang/ArrayStoreException"));
+ CHECK(ase.get() != NULL);
+ ase_ = reinterpret_cast<jclass>(env_->NewGlobalRef(ase.get()));
+
+ ScopedLocalRef<jclass> sioobe(env_,
+ env_->FindClass("java/lang/StringIndexOutOfBoundsException"));
+ CHECK(sioobe.get() != NULL);
+ sioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(sioobe.get()));
+ }
+
+ void CleanUpJniEnv() {
+ if (aioobe_ != NULL) {
+ env_->DeleteGlobalRef(aioobe_);
+ aioobe_ = NULL;
+ }
+ if (ase_ != NULL) {
+ env_->DeleteGlobalRef(ase_);
+ ase_ = NULL;
+ }
+ if (sioobe_ != NULL) {
+ env_->DeleteGlobalRef(sioobe_);
+ sioobe_ = NULL;
+ }
+ }
+
+ virtual void TearDown() {
+ CleanUpJniEnv();
+ CommonCompilerTest::TearDown();
+ }
+
+ jclass GetPrimitiveClass(char descriptor) {
+ ScopedObjectAccess soa(env_);
+ mirror::Class* c = class_linker_->FindPrimitiveClass(descriptor);
+ CHECK(c != nullptr);
+ return soa.AddLocalReference<jclass>(c);
+ }
+
+ void ReflectionTestMakeExecutable(mirror::ArtMethod** method,
+ mirror::Object** receiver,
+ bool is_static, const char* method_name,
+ const char* method_signature)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
+ jobject jclass_loader(LoadDex(class_name));
+ Thread* self = Thread::Current();
+ SirtRef<mirror::ClassLoader> null_class_loader(self, nullptr);
+ SirtRef<mirror::ClassLoader>
+ class_loader(self,
+ ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader));
+ if (is_static) {
+ MakeExecutable(ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader),
+ class_name);
+ } else {
+ MakeExecutable(nullptr, "java.lang.Class");
+ MakeExecutable(nullptr, "java.lang.Object");
+ MakeExecutable(ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader),
+ class_name);
+ }
+
+ mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
+ class_loader);
+ CHECK(c != NULL);
+
+ *method = is_static ? c->FindDirectMethod(method_name, method_signature)
+ : c->FindVirtualMethod(method_name, method_signature);
+ CHECK(method != nullptr);
+
+ *receiver = (is_static ? nullptr : c->AllocObject(self));
+
+ // Start runtime.
+ bool started = runtime_->Start();
+ CHECK(started);
+ self->TransitionFromSuspendedToRunnable();
+ }
+
+ void InvokeNopMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
+ InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), nullptr);
+ }
+
+ void InvokeIdentityByteMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(B)B");
+ jvalue args[1];
+
+ args[0].b = 0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0, result.GetB());
+
+ args[0].b = -1;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-1, result.GetB());
+
+ args[0].b = SCHAR_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(SCHAR_MAX, result.GetB());
+
+ args[0].b = (SCHAR_MIN << 24) >> 24;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(SCHAR_MIN, result.GetB());
+ }
+
+ void InvokeIdentityIntMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(I)I");
+ jvalue args[1];
+
+ args[0].i = 0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0, result.GetI());
+
+ args[0].i = -1;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-1, result.GetI());
+
+ args[0].i = INT_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(INT_MAX, result.GetI());
+
+ args[0].i = INT_MIN;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(INT_MIN, result.GetI());
+ }
+
+ void InvokeIdentityDoubleMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(D)D");
+ jvalue args[1];
+
+ args[0].d = 0.0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0.0, result.GetD());
+
+ args[0].d = -1.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-1.0, result.GetD());
+
+ args[0].d = DBL_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(DBL_MAX, result.GetD());
+
+ args[0].d = DBL_MIN;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(DBL_MIN, result.GetD());
+ }
+
+ void InvokeSumIntIntMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(II)I");
+ jvalue args[2];
+
+ args[0].i = 1;
+ args[1].i = 2;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(3, result.GetI());
+
+ args[0].i = -2;
+ args[1].i = 5;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(3, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MIN;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-1, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-2, result.GetI());
+ }
+
+ void InvokeSumIntIntIntMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(III)I");
+ jvalue args[3];
+
+ args[0].i = 0;
+ args[1].i = 0;
+ args[2].i = 0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0, result.GetI());
+
+ args[0].i = 1;
+ args[1].i = 2;
+ args[2].i = 3;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(6, result.GetI());
+
+ args[0].i = -1;
+ args[1].i = 2;
+ args[2].i = -3;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-2, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MIN;
+ args[2].i = INT_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(2147483646, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MAX;
+ args[2].i = INT_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(2147483645, result.GetI());
+ }
+
+ void InvokeSumIntIntIntIntMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIII)I");
+ jvalue args[4];
+
+ args[0].i = 0;
+ args[1].i = 0;
+ args[2].i = 0;
+ args[3].i = 0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0, result.GetI());
+
+ args[0].i = 1;
+ args[1].i = 2;
+ args[2].i = 3;
+ args[3].i = 4;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(10, result.GetI());
+
+ args[0].i = -1;
+ args[1].i = 2;
+ args[2].i = -3;
+ args[3].i = 4;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(2, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MIN;
+ args[2].i = INT_MAX;
+ args[3].i = INT_MIN;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-2, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MAX;
+ args[2].i = INT_MAX;
+ args[3].i = INT_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-4, result.GetI());
+ }
+
+ void InvokeSumIntIntIntIntIntMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIIII)I");
+ jvalue args[5];
+
+ args[0].i = 0;
+ args[1].i = 0;
+ args[2].i = 0;
+ args[3].i = 0;
+ args[4].i = 0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0, result.GetI());
+
+ args[0].i = 1;
+ args[1].i = 2;
+ args[2].i = 3;
+ args[3].i = 4;
+ args[4].i = 5;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(15, result.GetI());
+
+ args[0].i = -1;
+ args[1].i = 2;
+ args[2].i = -3;
+ args[3].i = 4;
+ args[4].i = -5;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-3, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MIN;
+ args[2].i = INT_MAX;
+ args[3].i = INT_MIN;
+ args[4].i = INT_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(2147483645, result.GetI());
+
+ args[0].i = INT_MAX;
+ args[1].i = INT_MAX;
+ args[2].i = INT_MAX;
+ args[3].i = INT_MAX;
+ args[4].i = INT_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(2147483643, result.GetI());
+ }
+
+ void InvokeSumDoubleDoubleMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DD)D");
+ jvalue args[2];
+
+ args[0].d = 0.0;
+ args[1].d = 0.0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = 2.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(3.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = -2.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-1.0, result.GetD());
+
+ args[0].d = DBL_MAX;
+ args[1].d = DBL_MIN;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(1.7976931348623157e308, result.GetD());
+
+ args[0].d = DBL_MAX;
+ args[1].d = DBL_MAX;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(INFINITY, result.GetD());
+ }
+
+ void InvokeSumDoubleDoubleDoubleMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDD)D");
+ jvalue args[3];
+
+ args[0].d = 0.0;
+ args[1].d = 0.0;
+ args[2].d = 0.0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = 2.0;
+ args[2].d = 3.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(6.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = -2.0;
+ args[2].d = 3.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(2.0, result.GetD());
+ }
+
+ void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDD)D");
+ jvalue args[4];
+
+ args[0].d = 0.0;
+ args[1].d = 0.0;
+ args[2].d = 0.0;
+ args[3].d = 0.0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = 2.0;
+ args[2].d = 3.0;
+ args[3].d = 4.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(10.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = -2.0;
+ args[2].d = 3.0;
+ args[3].d = -4.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(-2.0, result.GetD());
+ }
+
+ void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) {
+ ScopedObjectAccess soa(env_);
+ mirror::ArtMethod* method;
+ mirror::Object* receiver;
+ ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDDD)D");
+ jvalue args[5];
+
+ args[0].d = 0.0;
+ args[1].d = 0.0;
+ args[2].d = 0.0;
+ args[3].d = 0.0;
+ args[4].d = 0.0;
+ JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(0.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = 2.0;
+ args[2].d = 3.0;
+ args[3].d = 4.0;
+ args[4].d = 5.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(15.0, result.GetD());
+
+ args[0].d = 1.0;
+ args[1].d = -2.0;
+ args[2].d = 3.0;
+ args[3].d = -4.0;
+ args[4].d = 5.0;
+ result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ EXPECT_EQ(3.0, result.GetD());
+ }
+
+ JavaVMExt* vm_;
+ JNIEnv* env_;
+ jclass aioobe_;
+ jclass ase_;
+ jclass sioobe_;
+};
+
+TEST_F(ReflectionTest, StaticMainMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader = LoadDex("Main");
+ SirtRef<mirror::ClassLoader>
+ class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
+
+ mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
+ ASSERT_TRUE(klass != NULL);
+
+ mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
+ ASSERT_TRUE(method != NULL);
+
+ // Start runtime.
+ bool started = runtime_->Start();
+ CHECK(started);
+ soa.Self()->TransitionFromSuspendedToRunnable();
+
+ jvalue args[1];
+ args[0].l = nullptr;
+ InvokeWithJValues(soa, nullptr, soa.EncodeMethod(method), args);
+}
+
+TEST_F(ReflectionTest, StaticNopMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeNopMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticNopMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeNopMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticIdentityByteMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeIdentityByteMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticIdentityByteMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeIdentityByteMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticIdentityIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeIdentityIntMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticIdentityIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeIdentityIntMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticIdentityDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeIdentityDoubleMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticIdentityDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeIdentityDoubleMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumIntIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntIntMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumIntIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntIntMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumIntIntIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntIntIntMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumIntIntIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntIntIntMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumIntIntIntIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntIntIntIntMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumIntIntIntIntIntMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumIntIntIntIntIntMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumDoubleDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleDoubleMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumDoubleDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleDoubleMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumDoubleDoubleDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleDoubleDoubleMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumDoubleDoubleDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleDoubleDoubleMethod(false);
+}
+
+TEST_F(ReflectionTest, StaticSumDoubleDoubleDoubleDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(true);
+}
+
+TEST_F(ReflectionTest, NonStaticSumDoubleDoubleDoubleDoubleDoubleMethod) {
+ TEST_DISABLED_FOR_PORTABLE();
+ InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(false);
+}
+
+} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d1c83709e8..51edc85b10 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -42,7 +42,6 @@
#include "image.h"
#include "instrumentation.h"
#include "intern_table.h"
-#include "invoke_arg_array_builder.h"
#include "jni_internal.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -54,6 +53,7 @@
#include "monitor.h"
#include "parsed_options.h"
#include "oat_file.h"
+#include "reflection.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "signal_catcher.h"
@@ -123,7 +123,10 @@ Runtime::Runtime()
system_thread_group_(nullptr),
system_class_loader_(nullptr),
dump_gc_performance_on_shutdown_(false),
- preinitialization_transaction(nullptr) {
+ preinitialization_transaction(nullptr),
+ null_pointer_handler_(nullptr),
+ suspend_handler_(nullptr),
+ stack_overflow_handler_(nullptr) {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
callee_save_methods_[i] = nullptr;
}
@@ -170,6 +173,10 @@ Runtime::~Runtime() {
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
+
+ delete null_pointer_handler_;
+ delete suspend_handler_;
+ delete stack_overflow_handler_;
}
struct AbortState {
@@ -309,9 +316,7 @@ jobject CreateSystemClassLoader() {
class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
CHECK(getSystemClassLoader != NULL);
- JValue result;
- ArgArray arg_array(nullptr, 0);
- InvokeWithArgArray(soa, getSystemClassLoader, &arg_array, &result, "L");
+ JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
down_cast<mirror::ClassLoader*>(result.GetL()));
CHECK(class_loader.get() != nullptr);
@@ -515,6 +520,27 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) {
GetInstrumentation()->ForceInterpretOnly();
}
+ if (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
+ ParsedOptions::kExplicitNullCheck |
+ ParsedOptions::kExplicitStackOverflowCheck)) {
+ // Initialize the fault manager.
+ fault_manager.Init();
+
+ // These need to be in a specific order. The null point check must be
+ // the last in the list.
+ if ((options->explicit_checks_ & ParsedOptions::kExplicitSuspendCheck) == 0) {
+ suspend_handler_ = new SuspensionHandler(&fault_manager);
+ }
+
+ if ((options->explicit_checks_ & ParsedOptions::kExplicitStackOverflowCheck) == 0) {
+ stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
+ }
+
+ if ((options->explicit_checks_ & ParsedOptions::kExplicitNullCheck) == 0) {
+ null_pointer_handler_ = new NullPointerHandler(&fault_manager);
+ }
+ }
+
heap_ = new gc::Heap(options->heap_initial_size_,
options->heap_growth_limit_,
options->heap_min_free_,
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 109f031b4a..eeaaa2be2d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -36,6 +36,7 @@
#include "object_callbacks.h"
#include "runtime_stats.h"
#include "safe_map.h"
+#include "fault_handler.h"
namespace art {
@@ -404,6 +405,18 @@ class Runtime {
return fault_message_;
}
+ bool ExplicitNullChecks() const {
+ return null_pointer_handler_ == nullptr;
+ }
+
+ bool ExplicitSuspendChecks() const {
+ return suspend_handler_ == nullptr;
+ }
+
+ bool ExplicitStackOverflowChecks() const {
+ return stack_overflow_handler_ == nullptr;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -536,6 +549,9 @@ class Runtime {
// Transaction used for pre-initializing classes at compilation time.
Transaction* preinitialization_transaction;
+ NullPointerHandler* null_pointer_handler_;
+ SuspensionHandler* suspend_handler_;
+ StackOverflowHandler* stack_overflow_handler_;
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index d9e7986efe..ebc545250a 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -171,28 +171,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
- IndirectReferenceTable& locals = Env()->locals;
-
- uint32_t cookie = Env()->local_ref_cookie;
- IndirectRef ref = locals.Add(cookie, obj);
-
-#if 0 // TODO: fix this to understand PushLocalFrame, so we can turn it on.
- if (Env()->check_jni) {
- size_t entry_count = locals.Capacity();
- if (entry_count > 16) {
- LOG(WARNING) << "Warning: more than 16 JNI local references: "
- << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n"
- << Dumpable<IndirectReferenceTable>(locals);
- // TODO: LOG(FATAL) in a later release?
- }
- }
-#endif
- if (Vm()->work_around_app_jni_bugs) {
- // Hand out direct pointers to support broken old apps.
- return reinterpret_cast<T>(obj);
- }
-
- return reinterpret_cast<T>(ref);
+ return Env()->AddLocalReference<T>(obj, Vm()->work_around_app_jni_bugs);
}
template<typename T>
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 15b288e58d..f397afa56b 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -92,11 +92,11 @@ StackVisitor::StackVisitor(Thread* thread, Context* context)
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
-uint32_t StackVisitor::GetDexPc() const {
+uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
if (cur_shadow_frame_ != NULL) {
return cur_shadow_frame_->GetDexPC();
} else if (cur_quick_frame_ != NULL) {
- return GetMethod()->ToDexPc(cur_quick_frame_pc_);
+ return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
} else {
return 0;
}
@@ -108,17 +108,11 @@ mirror::Object* StackVisitor::GetThisObject() const {
return NULL;
} else if (m->IsNative()) {
if (cur_quick_frame_ != NULL) {
- if (m->GetEntryPointFromQuickCompiledCode() == GetQuickGenericJniTrampoline()) {
- UNIMPLEMENTED(ERROR) << "Failed to determine this object of native method: "
- << PrettyMethod(m);
- return nullptr;
- } else {
- StackIndirectReferenceTable* sirt =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<char*>(cur_quick_frame_) +
- m->GetSirtOffsetInBytes());
- return sirt->GetReference(0);
- }
+ StackIndirectReferenceTable* sirt =
+ reinterpret_cast<StackIndirectReferenceTable*>(
+ reinterpret_cast<char*>(cur_quick_frame_) +
+ m->GetSirtOffsetInBytes());
+ return sirt->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
diff --git a/runtime/stack.h b/runtime/stack.h
index f840f6776f..4ee5de1d89 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -520,7 +520,7 @@ class StackVisitor {
return cur_shadow_frame_ != nullptr;
}
- uint32_t GetDexPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/stack_indirect_reference_table.h b/runtime/stack_indirect_reference_table.h
index c2d6a595fb..e6dda8557a 100644
--- a/runtime/stack_indirect_reference_table.h
+++ b/runtime/stack_indirect_reference_table.h
@@ -39,7 +39,7 @@ class StackIndirectReferenceTable {
~StackIndirectReferenceTable() {}
- // Number of references contained within this SIRT
+ // Number of references contained within this SIRT.
uint32_t NumberOfReferences() const {
return number_of_references_;
}
@@ -51,7 +51,13 @@ class StackIndirectReferenceTable {
return header_size + data_size;
}
- // Link to previous SIRT or NULL
+ // Get the size of the SIRT for the number of entries, with padding added for potential alignment.
+ static size_t GetAlignedSirtSize(uint32_t num_references) {
+ size_t sirt_size = SizeOf(num_references);
+ return RoundUp(sirt_size, 8);
+ }
+
+ // Link to previous SIRT or NULL.
StackIndirectReferenceTable* GetLink() const {
return link_;
}
@@ -72,6 +78,12 @@ class StackIndirectReferenceTable {
return references_[i].AsMirrorPtr();
}
+ StackReference<mirror::Object>* GetStackReference(size_t i)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, number_of_references_);
+ return &references_[i];
+ }
+
void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, number_of_references_);
references_[i].Assign(object);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fbdf95f0e1..afa55740b6 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -45,7 +45,6 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
#include "gc/space/space.h"
-#include "invoke_arg_array_builder.h"
#include "jni_internal.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -174,12 +173,7 @@ void* Thread::CreateCallback(void* arg) {
// Invoke the 'run' method of our java.lang.Thread.
mirror::Object* receiver = self->opeer_;
jmethodID mid = WellKnownClasses::java_lang_Thread_run;
- mirror::ArtMethod* m =
- receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
- JValue result;
- ArgArray arg_array(nullptr, 0);
- arg_array.Append(receiver);
- m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, "V");
+ InvokeVirtualOrInterfaceWithJValues(soa, receiver, mid, nullptr);
}
// Detach and delete self.
Runtime::Current()->GetThreadList()->Unregister(self);
@@ -302,6 +296,7 @@ void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
SetUpAlternateSignalStack();
InitCpu();
InitTlsEntryPoints();
+ RemoveSuspendTrigger();
InitCardTable();
InitTid();
// Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
@@ -576,6 +571,7 @@ void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
AtomicClearFlag(kSuspendRequest);
} else {
AtomicSetFlag(kSuspendRequest);
+ TriggerSuspend();
}
}
@@ -643,6 +639,7 @@ bool Thread::RequestCheckpoint(Closure* function) {
checkpoint_functions_[available_checkpoint] = nullptr;
} else {
CHECK_EQ(ReadFlag(kCheckpointRequest), true);
+ TriggerSuspend();
}
return succeeded == 0;
}
@@ -1410,10 +1407,8 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa)
return soa.AddLocalReference<jobjectArray>(trace);
}
-jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
- jobjectArray output_array, int* stack_depth) {
- // Transition into runnable state to work on Object*/Array*
- ScopedObjectAccess soa(env);
+jobjectArray Thread::InternalStackTraceToStackTraceElementArray(const ScopedObjectAccess& soa,
+ jobject internal, jobjectArray output_array, int* stack_depth) {
// Decode the internal stack trace into the depth, method trace and PC trace
int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
@@ -1523,11 +1518,12 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
const char* exception_class_descriptor,
const char* msg) {
DCHECK_EQ(this, Thread::Current());
+ ScopedObjectAccessUnchecked soa(this);
// Ensure we don't forget arguments over object allocation.
SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod());
// Ignore the cause throw location. TODO: should we report this as a re-throw?
- SirtRef<mirror::Throwable> cause(this, GetException(nullptr));
+ ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException(nullptr)));
ClearException();
Runtime* runtime = Runtime::Current();
@@ -1563,28 +1559,24 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
// Choose an appropriate constructor and set up the arguments.
const char* signature;
- const char* shorty;
- SirtRef<mirror::String> msg_string(this, nullptr);
+ ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
if (msg != nullptr) {
// Ensure we remember this and the method over the String allocation.
- msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg));
+ msg_string.reset(
+ soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
if (UNLIKELY(msg_string.get() == nullptr)) {
CHECK(IsExceptionPending()); // OOME.
return;
}
if (cause.get() == nullptr) {
- shorty = "VL";
signature = "(Ljava/lang/String;)V";
} else {
- shorty = "VLL";
signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
}
} else {
if (cause.get() == nullptr) {
- shorty = "V";
signature = "()V";
} else {
- shorty = "VL";
signature = "(Ljava/lang/Throwable;)V";
}
}
@@ -1599,25 +1591,27 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
// case in the compiler. We won't be able to invoke the constructor of the exception, so set
// the exception fields directly.
if (msg != nullptr) {
- exception->SetDetailMessage(msg_string.get());
+ exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
}
if (cause.get() != nullptr) {
- exception->SetCause(cause.get());
+ exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
}
ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
throw_location.GetDexPc());
SetException(gc_safe_throw_location, exception.get());
} else {
- ArgArray args(shorty, strlen(shorty));
- args.Append(exception.get());
+ jvalue jv_args[2];
+ size_t i = 0;
+
if (msg != nullptr) {
- args.Append(msg_string.get());
+ jv_args[i].l = msg_string.get();
+ ++i;
}
if (cause.get() != nullptr) {
- args.Append(cause.get());
+ jv_args[i].l = cause.get();
+ ++i;
}
- JValue result;
- exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, shorty);
+ InvokeWithJValues(soa, exception.get(), soa.EncodeMethod(exception_init_method), jv_args);
if (LIKELY(!IsExceptionPending())) {
ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
throw_location.GetDexPc());
@@ -1774,6 +1768,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_
// DO_THREAD_OFFSET(top_of_managed_stack_);
// DO_THREAD_OFFSET(top_of_managed_stack_pc_);
DO_THREAD_OFFSET(top_sirt_);
+ DO_THREAD_OFFSET(suspend_trigger_);
#undef DO_THREAD_OFFSET
size_t entry_point_count = arraysize(gThreadEntryPointInfo);
diff --git a/runtime/thread.h b/runtime/thread.h
index 2ebc107942..fdf976d2bd 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -95,7 +95,12 @@ enum ThreadFlag {
class PACKED(4) Thread {
public:
// Space to throw a StackOverflowError in.
- static const size_t kStackOverflowReservedBytes = 16 * KB;
+#if __LP64__
+ // TODO: shrink reserved space, in particular for 64bit.
+ static constexpr size_t kStackOverflowReservedBytes = 24 * KB;
+#else
+ static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
+#endif
// Creates a new native thread corresponding to the given managed peer.
// Used to implement Thread.start.
@@ -393,8 +398,9 @@ class PACKED(4) Thread {
// StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
// frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
// with the number of valid frames in the returned array.
- static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
- jobjectArray output_array = NULL, int* stack_depth = NULL);
+ static jobjectArray InternalStackTraceToStackTraceElementArray(const ScopedObjectAccess& soa,
+ jobject internal, jobjectArray output_array = nullptr, int* stack_depth = nullptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -428,6 +434,10 @@ class PACKED(4) Thread {
return ThreadOffset(OFFSETOF_MEMBER(Thread, state_and_flags_));
}
+ static ThreadOffset ThreadSuspendTriggerOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_trigger_));
+ }
+
// Size of stack less any space reserved for stack overflow
size_t GetStackSize() const {
return stack_size_ - (stack_end_ - stack_begin_);
@@ -819,6 +829,10 @@ class PACKED(4) Thread {
PortableEntryPoints portable_entrypoints_;
QuickEntryPoints quick_entrypoints_;
+ // Setting this to 0 will trigger a SEGV and thus a suspend check. It is normally
+ // set to the address of itself.
+ uintptr_t* suspend_trigger_;
+
// How many times has our pthread key's destructor been called?
uint32_t thread_exit_check_count_;
@@ -833,6 +847,20 @@ class PACKED(4) Thread {
mirror::Object* AllocTlab(size_t bytes);
void SetTlab(byte* start, byte* end);
+ // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
+ // equal to a valid pointer.
+ // TODO: does this need to atomic? I don't think so.
+ void RemoveSuspendTrigger() {
+ suspend_trigger_ = reinterpret_cast<uintptr_t*>(&suspend_trigger_);
+ }
+
+ // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
+ // The next time a suspend check is done, it will load from the value at this address
+ // and trigger a SIGSEGV.
+ void TriggerSuspend() {
+ suspend_trigger_ = nullptr;
+ }
+
// Thread-local rosalloc runs. There are 34 size brackets in rosalloc
// runs (RosAlloc::kNumOfSizeBrackets). We can't refer to the
// RosAlloc class due to a header file circular dependency issue.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ac5750b01c..ec610e1920 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -753,6 +753,7 @@ void ThreadList::Register(Thread* self) {
self->debug_suspend_count_ = debug_suspend_all_count_;
if (self->suspend_count_ > 0) {
self->AtomicSetFlag(kSuspendRequest);
+ self->TriggerSuspend();
}
CHECK(!Contains(self));
list_.push_back(self);
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 2e6ce4fe71..76b6f270d9 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -17,7 +17,6 @@
#include "transaction.h"
#include "common_runtime_test.h"
-#include "invoke_arg_array_builder.h"
#include "mirror/array-inl.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
diff --git a/test/044-proxy/expected.txt b/test/044-proxy/expected.txt
index 12df250b95..105d9296f7 100644
--- a/test/044-proxy/expected.txt
+++ b/test/044-proxy/expected.txt
@@ -54,6 +54,7 @@ Proxy interfaces: [interface Quads, interface Colors, interface Trace]
Proxy methods: [public final java.lang.String $Proxy1.blob(), public final double $Proxy1.blue(int), public final R0a $Proxy1.checkMe(), public final R0aa $Proxy1.checkMe(), public final R0base $Proxy1.checkMe(), public final void $Proxy1.circle(int), public final boolean $Proxy1.equals(java.lang.Object), public final void $Proxy1.getTrace(), public final int $Proxy1.green(double), public final int $Proxy1.hashCode(), public final int $Proxy1.mauve(java.lang.String), public final int $Proxy1.rectangle(int,int), public final int $Proxy1.red(float), public final int $Proxy1.square(int,int), public final java.lang.String $Proxy1.toString(), public final int $Proxy1.trapezoid(int,double,int), public final void $Proxy1.upCheck() throws java.lang.InterruptedException, public final void $Proxy1.upChuck()]
Decl annos: []
Param annos (0) : []
+Modifiers: 17
Dupe threw expected exception
Clash threw expected exception
Clash2 threw expected exception
diff --git a/test/044-proxy/src/BasicTest.java b/test/044-proxy/src/BasicTest.java
index ea46f49f2b..d4ce71fa11 100644
--- a/test/044-proxy/src/BasicTest.java
+++ b/test/044-proxy/src/BasicTest.java
@@ -90,6 +90,7 @@ public class BasicTest {
Annotation[][] paramAnnos = meth.getParameterAnnotations();
System.out.println("Param annos (" + paramAnnos.length + ") : "
+ Arrays.deepToString(paramAnnos));
+ System.out.println("Modifiers: " + meth.getModifiers());
}
static Object createProxy(Object proxyMe) {
@@ -244,14 +245,15 @@ class MyInvocationHandler implements InvocationHandler {
// invocation of toString() in the print statements below.
if (method.getDeclaringClass() == java.lang.Object.class) {
//System.out.println("!!! object " + method.getName());
- if (method.getName().equals("toString"))
+ if (method.getName().equals("toString")) {
return super.toString();
- else if (method.getName().equals("hashCode"))
+ } else if (method.getName().equals("hashCode")) {
return Integer.valueOf(super.hashCode());
- else if (method.getName().equals("equals"))
+ } else if (method.getName().equals("equals")) {
return Boolean.valueOf(super.equals(args[0]));
- else
+ } else {
throw new RuntimeException("huh?");
+ }
}
if (method.getDeclaringClass() == Trace.class) {
@@ -277,10 +279,11 @@ class MyInvocationHandler implements InvocationHandler {
}
try {
- if (true)
+ if (true) {
result = method.invoke(mObj, args);
- else
+ } else {
result = -1;
+ }
System.out.println("Success: method " + method.getName()
+ " res=" + result);
} catch (InvocationTargetException ite) {
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index f4d2dd114a..86a03abca2 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -96,8 +96,10 @@ public class Main {
}
}
+ static int start;
public static void test_String_indexOf() {
String str0 = "";
+ String str1 = "/";
String str3 = "abc";
String str10 = "abcdefghij";
String str40 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc";
@@ -122,6 +124,7 @@ public class Main {
Assert.assertEquals(str0.indexOf('a',20), -1);
Assert.assertEquals(str0.indexOf('a',0), -1);
Assert.assertEquals(str0.indexOf('a',-1), -1);
+ Assert.assertEquals(str1.indexOf('/',++start), -1);
Assert.assertEquals(str3.indexOf('a',0), 0);
Assert.assertEquals(str3.indexOf('a',1), -1);
Assert.assertEquals(str3.indexOf('a',1234), -1);
diff --git a/test/202-thread-oome/src/Main.java b/test/202-thread-oome/src/Main.java
index bacb842855..f7df93be20 100644
--- a/test/202-thread-oome/src/Main.java
+++ b/test/202-thread-oome/src/Main.java
@@ -16,7 +16,7 @@
public class Main {
public static void main(String[] args) throws Exception {
- Thread t = new Thread(null, new Runnable() { public void run() {} }, "", 3*1024*1024*1024);
+ Thread t = new Thread(null, new Runnable() { public void run() {} }, "", 3L*1024*1024*1024);
try {
t.start();
} catch (OutOfMemoryError expected) {
diff --git a/test/302-float-conversion/expected.txt b/test/302-float-conversion/expected.txt
index 6939a5cccb..7d5c1eba62 100644
--- a/test/302-float-conversion/expected.txt
+++ b/test/302-float-conversion/expected.txt
@@ -1 +1,2 @@
-Result is as expected
+Iteration Result is as expected
+inter4:2.0
diff --git a/test/302-float-conversion/info.txt b/test/302-float-conversion/info.txt
index 2b8bc2174c..eb3d6c43ae 100644
--- a/test/302-float-conversion/info.txt
+++ b/test/302-float-conversion/info.txt
@@ -2,3 +2,4 @@ Tests whether constant conversions of double values to long values are
properly handled by the VM. For example, x86 systems using the x87 stack
should not overflow under constant conversions.
+The second test checks the Load hoisting optimization for float pointing conversion. \ No newline at end of file
diff --git a/test/302-float-conversion/src/Main.java b/test/302-float-conversion/src/Main.java
index dc512c5dcf..afc5e976d9 100644
--- a/test/302-float-conversion/src/Main.java
+++ b/test/302-float-conversion/src/Main.java
@@ -19,6 +19,11 @@ public class Main {
static volatile double negInfinity = Double.NEGATIVE_INFINITY;
public static void main(String args[]) {
+ test1();
+ test2();
+ }
+
+ public static void test1() {
long sumInf = 0;
long sumRes = 0;
@@ -35,9 +40,19 @@ public class Main {
}
if (sumRes == NUM_ITERATIONS / 2) {
- System.out.println("Result is as expected");
+ System.out.println("Iteration Result is as expected");
} else {
System.out.println("Conversions failed over " + NUM_ITERATIONS + " iterations");
}
}
+
+ public static void test2() {
+ long a = 1L;
+ long b = 2L;
+
+ float inter3 = a;
+ float inter4 = b;
+ System.out.println("inter4:" + inter4);
+ }
+
}
diff --git a/test/Android.mk b/test/Android.mk
index f4a0426bf1..da5b35f64b 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -44,8 +44,7 @@ TEST_DEX_DIRECTORIES := \
TEST_OAT_DIRECTORIES := \
Main \
HelloWorld \
- \
- InterfaceTest \
+ InterfaceTest \
JniTest \
NativeAllocations \
ParallelGC \
@@ -110,12 +109,12 @@ define declare-test-art-oat-targets
test-art-target-oat-$(1): $(ART_TEST_OUT)/oat-test-dex-$(1).jar test-art-target-sync
adb shell touch $(ART_TEST_DIR)/test-art-target-oat-$(1)
adb shell rm $(ART_TEST_DIR)/test-art-target-oat-$(1)
- adb shell sh -c "/system/bin/dalvikvm -XXlib:libartd.so -Ximage:$(ART_TEST_DIR)/core.art -classpath $(ART_TEST_DIR)/oat-test-dex-$(1).jar -Djava.library.path=$(ART_TEST_DIR) $(1) $(2) && touch $(ART_TEST_DIR)/test-art-target-oat-$(1)"
+ adb shell sh -c "/system/bin/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd.so -Ximage:$(ART_TEST_DIR)/core.art -classpath $(ART_TEST_DIR)/oat-test-dex-$(1).jar -Djava.library.path=$(ART_TEST_DIR) $(1) $(2) && touch $(ART_TEST_DIR)/test-art-target-oat-$(1)"
$(hide) (adb pull $(ART_TEST_DIR)/test-art-target-oat-$(1) /tmp/ && echo test-art-target-oat-$(1) PASSED) || (echo test-art-target-oat-$(1) FAILED && exit 1)
$(hide) rm /tmp/test-art-target-oat-$(1)
$(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).odex: $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar $(HOST_CORE_IMG_OUT) | $(DEX2OAT)
- $(DEX2OAT) --runtime-arg -Xms16m --runtime-arg -Xmx16m --boot-image=$(HOST_CORE_IMG_OUT) --dex-file=$(PWD)/$$< --oat-file=$(PWD)/$$@ --instruction-set=$(HOST_ARCH) --host --android-root=$(HOST_OUT)
+ $(DEX2OAT) $(DEX2OAT_FLAGS) --runtime-arg -Xms16m --runtime-arg -Xmx16m --boot-image=$(HOST_CORE_IMG_OUT) --dex-file=$(PWD)/$$< --oat-file=$(PWD)/$$@ --instruction-set=$(ART_HOST_ARCH) --host --android-root=$(HOST_OUT)
.PHONY: test-art-host-oat-default-$(1)
test-art-host-oat-default-$(1): $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).odex test-art-host-dependencies
@@ -123,7 +122,7 @@ test-art-host-oat-default-$(1): $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).ode
ANDROID_DATA=/tmp/android-data/test-art-host-oat-default-$(1) \
ANDROID_ROOT=$(HOST_OUT) \
LD_LIBRARY_PATH=$(HOST_OUT_SHARED_LIBRARIES) \
- $(HOST_OUT_EXECUTABLES)/dalvikvm -XXlib:libartd.so -Ximage:$(shell pwd)/$(HOST_CORE_IMG_OUT) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd.so -Ximage:$(shell pwd)/$(HOST_CORE_IMG_OUT) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
&& echo test-art-host-oat-default-$(1) PASSED || (echo test-art-host-oat-default-$(1) FAILED && exit 1)
$(hide) rm -r /tmp/android-data/test-art-host-oat-default-$(1)