Merge "dalvikvm: create the dalvikvm symlink to the primary version"
diff --git a/Android.mk b/Android.mk
index fe631d9..2196d59 100644
--- a/Android.mk
+++ b/Android.mk
@@ -150,9 +150,22 @@
########################################################################
# host test targets
+.PHONY: test-art-host-vixl
+ifneq ($(BUILD_HOST_64bit),)
+test-art-host-vixl: $(ANDROID_HOST_OUT)/bin/cctest_vixl
+ $(ANDROID_HOST_OUT)/bin/cctest_vixl --run_all
+ @echo vixl PASSED
+
+else
+# vixl test needs 64b host.
+test-art-host-vixl:
+ @echo vixl test only runnable with 64b host build.
+
+endif
+
# "mm test-art-host" to build and run all host tests
.PHONY: test-art-host
-test-art-host: test-art-host-gtest test-art-host-oat test-art-host-run-test
+test-art-host: test-art-host-gtest test-art-host-oat test-art-host-run-test test-art-host-vixl
@echo test-art-host PASSED
.PHONY: test-art-host-interpreter
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 188ddb5..39601e4 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -116,9 +116,11 @@
ART_DEFAULT_GC_TYPE ?= CMS
ART_DEFAULT_GC_TYPE_CFLAGS := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE)
-LLVM_ROOT_PATH := external/llvm
-# Don't fail a dalvik minimal host build.
--include $(LLVM_ROOT_PATH)/llvm.mk
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ LLVM_ROOT_PATH := external/llvm
+ # Don't fail a dalvik minimal host build.
+ -include $(LLVM_ROOT_PATH)/llvm.mk
+endif
# Clang build support.
# Target builds use GCC by default.
@@ -249,10 +251,19 @@
ifeq ($(TARGET_CPU_SMP),true)
ART_TARGET_CFLAGS += -DANDROID_SMP=1
else
- ART_TARGET_CFLAGS += -DANDROID_SMP=0
+ ifeq ($(TARGET_CPU_SMP),false)
+ ART_TARGET_CFLAGS += -DANDROID_SMP=0
+ else
+ $(warning TARGET_CPU_SMP should be (true|false), found $(TARGET_CPU_SMP))
+ # Make sure we emit barriers for the worst case.
+ ART_TARGET_CFLAGS += -DANDROID_SMP=1
+ endif
endif
ART_TARGET_CFLAGS += $(ART_DEFAULT_GC_TYPE_CFLAGS)
+# TODO: remove when target no longer implies stlport.
+ART_TARGET_CFLAGS += -DART_WITH_STLPORT=1
+
# DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES is set in ../build/core/dex_preopt.mk based on
# the TARGET_CPU_VARIANT
ifeq ($(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES),)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c986c57..b157d8e 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -47,6 +47,7 @@
runtime/gc/space/rosalloc_space_random_test.cc \
runtime/gc/space/large_object_space_test.cc \
runtime/gtest_test.cc \
+ runtime/handle_scope_test.cc \
runtime/indenter_test.cc \
runtime/indirect_reference_table_test.cc \
runtime/instruction_set_test.cc \
@@ -62,8 +63,7 @@
runtime/utils_test.cc \
runtime/verifier/method_verifier_test.cc \
runtime/verifier/reg_type_test.cc \
- runtime/zip_archive_test.cc \
- runtime/stack_indirect_reference_table_test.cc
+ runtime/zip_archive_test.cc
COMPILER_GTEST_COMMON_SRC_FILES := \
runtime/jni_internal_test.cc \
@@ -79,6 +79,7 @@
compiler/optimizing/codegen_test.cc \
compiler/optimizing/dominator_test.cc \
compiler/optimizing/find_loops_test.cc \
+ compiler/optimizing/linearize_test.cc \
compiler/optimizing/liveness_test.cc \
compiler/optimizing/pretty_printer_test.cc \
compiler/optimizing/ssa_test.cc \
diff --git a/build/Android.libcxx.mk b/build/Android.libcxx.mk
index 3dd1eb7..f84e957 100644
--- a/build/Android.libcxx.mk
+++ b/build/Android.libcxx.mk
@@ -14,7 +14,10 @@
# limitations under the License.
#
+LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.libcxx.mk
+
ifneq ($(LOCAL_IS_HOST_MODULE),true)
include external/stlport/libstlport.mk
+ LOCAL_CFLAGS += -DART_WITH_STLPORT=1
# include external/libcxx/libcxx.mk
endif
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 1b70d59..8592aaa 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -82,6 +82,7 @@
optimizing/code_generator.cc \
optimizing/code_generator_arm.cc \
optimizing/code_generator_x86.cc \
+ optimizing/graph_visualizer.cc \
optimizing/nodes.cc \
optimizing/optimizing_compiler.cc \
optimizing/ssa_builder.cc \
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 8bba84a..586c442 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -134,9 +134,6 @@
public:
// Create an OatMethod based on pointers (for unit tests).
OatFile::OatMethod CreateOatMethod(const void* code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
const uint8_t* gc_map) {
CHECK(code != nullptr);
const byte* base;
@@ -154,9 +151,6 @@
}
return OatFile::OatMethod(base,
code_offset,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
gc_map_offset);
}
@@ -179,11 +173,14 @@
CHECK_NE(0u, code_size);
const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
uint32_t vmap_table_offset = vmap_table.empty() ? 0u
- : sizeof(OatMethodHeader) + vmap_table.size();
+ : sizeof(OatQuickMethodHeader) + vmap_table.size();
const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
uint32_t mapping_table_offset = mapping_table.empty() ? 0u
- : sizeof(OatMethodHeader) + vmap_table.size() + mapping_table.size();
- OatMethodHeader method_header(vmap_table_offset, mapping_table_offset, code_size);
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size();
+ OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
+ compiled_method->GetFrameSizeInBytes(),
+ compiled_method->GetCoreSpillMask(),
+ compiled_method->GetFpSpillMask(), code_size);
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
@@ -207,11 +204,7 @@
const void* method_code = CompiledMethod::CodePointer(code_ptr,
compiled_method->GetInstructionSet());
LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
- OatFile::OatMethod oat_method = CreateOatMethod(method_code,
- compiled_method->GetFrameSizeInBytes(),
- compiled_method->GetCoreSpillMask(),
- compiled_method->GetFpSpillMask(),
- nullptr);
+ OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
} else {
@@ -220,28 +213,13 @@
if (!method->IsNative()) {
const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
: GetQuickToInterpreterBridge();
- OatFile::OatMethod oat_method = CreateOatMethod(method_code,
- kStackAlignment,
- 0,
- 0,
- nullptr);
+ OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
} else {
const void* method_code = GetQuickGenericJniTrampoline();
- mirror::ArtMethod* callee_save_method = runtime_->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
- // Compute Sirt size, as Sirt goes into frame
- MethodHelper mh(method);
- uint32_t sirt_refs = mh.GetNumberOfReferenceArgsWithoutReceiver() + 1;
- uint32_t sirt_size = StackIndirectReferenceTable::SizeOf(sirt_refs);
-
- OatFile::OatMethod oat_method = CreateOatMethod(method_code,
- callee_save_method->GetFrameSizeInBytes() +
- sirt_size,
- callee_save_method->GetCoreSpillMask(),
- callee_save_method->GetFpSpillMask(),
- nullptr);
+ OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
}
@@ -279,7 +257,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
@@ -323,11 +302,12 @@
compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
#endif
+ runtime_->SetInstructionSet(instruction_set);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
runtime_->SetCalleeSaveMethod(
- runtime_->CreateCalleeSaveMethod(instruction_set, type), type);
+ runtime_->CreateCalleeSaveMethod(type), type);
}
}
@@ -373,7 +353,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
@@ -393,7 +374,7 @@
timings.EndSplit();
}
- void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ void CompileDirectMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
@@ -406,7 +387,7 @@
CompileMethod(method);
}
- void CompileVirtualMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ void CompileVirtualMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 844b53c..c3c9961 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -22,7 +22,7 @@
#include "instruction_set.h"
#include "utils.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace llvm {
class Function;
diff --git a/compiler/compilers.h b/compiler/compilers.h
index 3ca78c9..e523d64 100644
--- a/compiler/compilers.h
+++ b/compiler/compilers.h
@@ -73,7 +73,7 @@
class OptimizingCompiler FINAL : public QuickCompiler {
public:
- explicit OptimizingCompiler(CompilerDriver* driver) : QuickCompiler(driver) { }
+ explicit OptimizingCompiler(CompilerDriver* driver);
CompiledMethod* Compile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -92,6 +92,8 @@
const DexFile& dex_file) const;
private:
+ UniquePtr<std::ostream> visualizer_output_;
+
DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
};
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index d9d392f..144594e 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -78,31 +78,32 @@
0;
CompilationUnit::CompilationUnit(ArenaPool* pool)
- : compiler_driver(NULL),
- class_linker(NULL),
- dex_file(NULL),
- class_loader(NULL),
+ : compiler_driver(nullptr),
+ class_linker(nullptr),
+ dex_file(nullptr),
+ class_loader(nullptr),
class_def_idx(0),
method_idx(0),
- code_item(NULL),
+ code_item(nullptr),
access_flags(0),
invoke_type(kDirect),
- shorty(NULL),
+ shorty(nullptr),
disable_opt(0),
enable_debug(0),
verbose(false),
- compiler(NULL),
+ compiler(nullptr),
instruction_set(kNone),
+ target64(false),
num_dalvik_registers(0),
- insns(NULL),
+ insns(nullptr),
num_ins(0),
num_outs(0),
num_regs(0),
compiler_flip_match(false),
arena(pool),
arena_stack(pool),
- mir_graph(NULL),
- cg(NULL),
+ mir_graph(nullptr),
+ cg(nullptr),
timings("QuickCompiler", true, false) {
}
@@ -409,28 +410,315 @@
};
// TODO: Remove this when we are able to compile everything.
-static bool CanCompileShorty(const char* shorty) {
+int x86_64_support_list[] = {
+ Instruction::NOP,
+ // Instruction::MOVE,
+ // Instruction::MOVE_FROM16,
+ // Instruction::MOVE_16,
+ // Instruction::MOVE_WIDE,
+ // Instruction::MOVE_WIDE_FROM16,
+ // Instruction::MOVE_WIDE_16,
+ // Instruction::MOVE_OBJECT,
+ // Instruction::MOVE_OBJECT_FROM16,
+ // Instruction::MOVE_OBJECT_16,
+ // Instruction::MOVE_RESULT,
+ // Instruction::MOVE_RESULT_WIDE,
+ // Instruction::MOVE_RESULT_OBJECT,
+ // Instruction::MOVE_EXCEPTION,
+ Instruction::RETURN_VOID,
+ Instruction::RETURN,
+ // Instruction::RETURN_WIDE,
+ Instruction::RETURN_OBJECT,
+ // Instruction::CONST_4,
+ // Instruction::CONST_16,
+ // Instruction::CONST,
+ // Instruction::CONST_HIGH16,
+ // Instruction::CONST_WIDE_16,
+ // Instruction::CONST_WIDE_32,
+ // Instruction::CONST_WIDE,
+ // Instruction::CONST_WIDE_HIGH16,
+ // Instruction::CONST_STRING,
+ // Instruction::CONST_STRING_JUMBO,
+ // Instruction::CONST_CLASS,
+ // Instruction::MONITOR_ENTER,
+ // Instruction::MONITOR_EXIT,
+ // Instruction::CHECK_CAST,
+ // Instruction::INSTANCE_OF,
+ // Instruction::ARRAY_LENGTH,
+ // Instruction::NEW_INSTANCE,
+ // Instruction::NEW_ARRAY,
+ // Instruction::FILLED_NEW_ARRAY,
+ // Instruction::FILLED_NEW_ARRAY_RANGE,
+ // Instruction::FILL_ARRAY_DATA,
+ // Instruction::THROW,
+ // Instruction::GOTO,
+ // Instruction::GOTO_16,
+ // Instruction::GOTO_32,
+ // Instruction::PACKED_SWITCH,
+ // Instruction::SPARSE_SWITCH,
+ // Instruction::CMPL_FLOAT,
+ // Instruction::CMPG_FLOAT,
+ // Instruction::CMPL_DOUBLE,
+ // Instruction::CMPG_DOUBLE,
+ // Instruction::CMP_LONG,
+ // Instruction::IF_EQ,
+ // Instruction::IF_NE,
+ // Instruction::IF_LT,
+ // Instruction::IF_GE,
+ // Instruction::IF_GT,
+ // Instruction::IF_LE,
+ // Instruction::IF_EQZ,
+ // Instruction::IF_NEZ,
+ // Instruction::IF_LTZ,
+ // Instruction::IF_GEZ,
+ // Instruction::IF_GTZ,
+ // Instruction::IF_LEZ,
+ // Instruction::UNUSED_3E,
+ // Instruction::UNUSED_3F,
+ // Instruction::UNUSED_40,
+ // Instruction::UNUSED_41,
+ // Instruction::UNUSED_42,
+ // Instruction::UNUSED_43,
+ // Instruction::AGET,
+ // Instruction::AGET_WIDE,
+ // Instruction::AGET_OBJECT,
+ // Instruction::AGET_BOOLEAN,
+ // Instruction::AGET_BYTE,
+ // Instruction::AGET_CHAR,
+ // Instruction::AGET_SHORT,
+ // Instruction::APUT,
+ // Instruction::APUT_WIDE,
+ // Instruction::APUT_OBJECT,
+ // Instruction::APUT_BOOLEAN,
+ // Instruction::APUT_BYTE,
+ // Instruction::APUT_CHAR,
+ // Instruction::APUT_SHORT,
+ // Instruction::IGET,
+ // Instruction::IGET_WIDE,
+ // Instruction::IGET_OBJECT,
+ // Instruction::IGET_BOOLEAN,
+ // Instruction::IGET_BYTE,
+ // Instruction::IGET_CHAR,
+ // Instruction::IGET_SHORT,
+ // Instruction::IPUT,
+ // Instruction::IPUT_WIDE,
+ // Instruction::IPUT_OBJECT,
+ // Instruction::IPUT_BOOLEAN,
+ // Instruction::IPUT_BYTE,
+ // Instruction::IPUT_CHAR,
+ // Instruction::IPUT_SHORT,
+ Instruction::SGET,
+ // Instruction::SGET_WIDE,
+ Instruction::SGET_OBJECT,
+ Instruction::SGET_BOOLEAN,
+ Instruction::SGET_BYTE,
+ Instruction::SGET_CHAR,
+ Instruction::SGET_SHORT,
+ Instruction::SPUT,
+ // Instruction::SPUT_WIDE,
+ Instruction::SPUT_OBJECT,
+ Instruction::SPUT_BOOLEAN,
+ Instruction::SPUT_BYTE,
+ Instruction::SPUT_CHAR,
+ Instruction::SPUT_SHORT,
+ Instruction::INVOKE_VIRTUAL,
+ Instruction::INVOKE_SUPER,
+ Instruction::INVOKE_DIRECT,
+ Instruction::INVOKE_STATIC,
+ Instruction::INVOKE_INTERFACE,
+ // Instruction::RETURN_VOID_BARRIER,
+ // Instruction::INVOKE_VIRTUAL_RANGE,
+ // Instruction::INVOKE_SUPER_RANGE,
+ // Instruction::INVOKE_DIRECT_RANGE,
+ // Instruction::INVOKE_STATIC_RANGE,
+ // Instruction::INVOKE_INTERFACE_RANGE,
+ // Instruction::UNUSED_79,
+ // Instruction::UNUSED_7A,
+ // Instruction::NEG_INT,
+ // Instruction::NOT_INT,
+ // Instruction::NEG_LONG,
+ // Instruction::NOT_LONG,
+ // Instruction::NEG_FLOAT,
+ // Instruction::NEG_DOUBLE,
+ // Instruction::INT_TO_LONG,
+ // Instruction::INT_TO_FLOAT,
+ // Instruction::INT_TO_DOUBLE,
+ // Instruction::LONG_TO_INT,
+ // Instruction::LONG_TO_FLOAT,
+ // Instruction::LONG_TO_DOUBLE,
+ // Instruction::FLOAT_TO_INT,
+ // Instruction::FLOAT_TO_LONG,
+ // Instruction::FLOAT_TO_DOUBLE,
+ // Instruction::DOUBLE_TO_INT,
+ // Instruction::DOUBLE_TO_LONG,
+ // Instruction::DOUBLE_TO_FLOAT,
+ // Instruction::INT_TO_BYTE,
+ // Instruction::INT_TO_CHAR,
+ // Instruction::INT_TO_SHORT,
+ // Instruction::ADD_INT,
+ // Instruction::SUB_INT,
+ // Instruction::MUL_INT,
+ // Instruction::DIV_INT,
+ // Instruction::REM_INT,
+ // Instruction::AND_INT,
+ // Instruction::OR_INT,
+ // Instruction::XOR_INT,
+ // Instruction::SHL_INT,
+ // Instruction::SHR_INT,
+ // Instruction::USHR_INT,
+ // Instruction::ADD_LONG,
+ // Instruction::SUB_LONG,
+ // Instruction::MUL_LONG,
+ // Instruction::DIV_LONG,
+ // Instruction::REM_LONG,
+ // Instruction::AND_LONG,
+ // Instruction::OR_LONG,
+ // Instruction::XOR_LONG,
+ // Instruction::SHL_LONG,
+ // Instruction::SHR_LONG,
+ // Instruction::USHR_LONG,
+ // Instruction::ADD_FLOAT,
+ // Instruction::SUB_FLOAT,
+ // Instruction::MUL_FLOAT,
+ // Instruction::DIV_FLOAT,
+ // Instruction::REM_FLOAT,
+ // Instruction::ADD_DOUBLE,
+ // Instruction::SUB_DOUBLE,
+ // Instruction::MUL_DOUBLE,
+ // Instruction::DIV_DOUBLE,
+ // Instruction::REM_DOUBLE,
+ // Instruction::ADD_INT_2ADDR,
+ // Instruction::SUB_INT_2ADDR,
+ // Instruction::MUL_INT_2ADDR,
+ // Instruction::DIV_INT_2ADDR,
+ // Instruction::REM_INT_2ADDR,
+ // Instruction::AND_INT_2ADDR,
+ // Instruction::OR_INT_2ADDR,
+ // Instruction::XOR_INT_2ADDR,
+ // Instruction::SHL_INT_2ADDR,
+ // Instruction::SHR_INT_2ADDR,
+ // Instruction::USHR_INT_2ADDR,
+ // Instruction::ADD_LONG_2ADDR,
+ // Instruction::SUB_LONG_2ADDR,
+ // Instruction::MUL_LONG_2ADDR,
+ // Instruction::DIV_LONG_2ADDR,
+ // Instruction::REM_LONG_2ADDR,
+ // Instruction::AND_LONG_2ADDR,
+ // Instruction::OR_LONG_2ADDR,
+ // Instruction::XOR_LONG_2ADDR,
+ // Instruction::SHL_LONG_2ADDR,
+ // Instruction::SHR_LONG_2ADDR,
+ // Instruction::USHR_LONG_2ADDR,
+ // Instruction::ADD_FLOAT_2ADDR,
+ // Instruction::SUB_FLOAT_2ADDR,
+ // Instruction::MUL_FLOAT_2ADDR,
+ // Instruction::DIV_FLOAT_2ADDR,
+ // Instruction::REM_FLOAT_2ADDR,
+ // Instruction::ADD_DOUBLE_2ADDR,
+ // Instruction::SUB_DOUBLE_2ADDR,
+ // Instruction::MUL_DOUBLE_2ADDR,
+ // Instruction::DIV_DOUBLE_2ADDR,
+ // Instruction::REM_DOUBLE_2ADDR,
+ // Instruction::ADD_INT_LIT16,
+ // Instruction::RSUB_INT,
+ // Instruction::MUL_INT_LIT16,
+ // Instruction::DIV_INT_LIT16,
+ // Instruction::REM_INT_LIT16,
+ // Instruction::AND_INT_LIT16,
+ // Instruction::OR_INT_LIT16,
+ // Instruction::XOR_INT_LIT16,
+ // Instruction::ADD_INT_LIT8,
+ // Instruction::RSUB_INT_LIT8,
+ // Instruction::MUL_INT_LIT8,
+ // Instruction::DIV_INT_LIT8,
+ // Instruction::REM_INT_LIT8,
+ // Instruction::AND_INT_LIT8,
+ // Instruction::OR_INT_LIT8,
+ // Instruction::XOR_INT_LIT8,
+ // Instruction::SHL_INT_LIT8,
+ // Instruction::SHR_INT_LIT8,
+ // Instruction::USHR_INT_LIT8,
+ // Instruction::IGET_QUICK,
+ // Instruction::IGET_WIDE_QUICK,
+ // Instruction::IGET_OBJECT_QUICK,
+ // Instruction::IPUT_QUICK,
+ // Instruction::IPUT_WIDE_QUICK,
+ // Instruction::IPUT_OBJECT_QUICK,
+ // Instruction::INVOKE_VIRTUAL_QUICK,
+ // Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
+ // Instruction::UNUSED_EB,
+ // Instruction::UNUSED_EC,
+ // Instruction::UNUSED_ED,
+ // Instruction::UNUSED_EE,
+ // Instruction::UNUSED_EF,
+ // Instruction::UNUSED_F0,
+ // Instruction::UNUSED_F1,
+ // Instruction::UNUSED_F2,
+ // Instruction::UNUSED_F3,
+ // Instruction::UNUSED_F4,
+ // Instruction::UNUSED_F5,
+ // Instruction::UNUSED_F6,
+ // Instruction::UNUSED_F7,
+ // Instruction::UNUSED_F8,
+ // Instruction::UNUSED_F9,
+ // Instruction::UNUSED_FA,
+ // Instruction::UNUSED_FB,
+ // Instruction::UNUSED_FC,
+ // Instruction::UNUSED_FD,
+ // Instruction::UNUSED_FE,
+ // Instruction::UNUSED_FF,
+
+ // ----- ExtendedMIROpcode -----
+ // kMirOpPhi,
+ // kMirOpCopy,
+ // kMirOpFusedCmplFloat,
+ // kMirOpFusedCmpgFloat,
+ // kMirOpFusedCmplDouble,
+ // kMirOpFusedCmpgDouble,
+ // kMirOpFusedCmpLong,
+ // kMirOpNop,
+ // kMirOpNullCheck,
+ // kMirOpRangeCheck,
+ // kMirOpDivZeroCheck,
+ // kMirOpCheck,
+ // kMirOpCheckPart2,
+ // kMirOpSelect,
+ // kMirOpLast,
+};
+
+// Z : boolean
+// B : byte
+// S : short
+// C : char
+// I : int
+// L : long
+// F : float
+// D : double
+// L : reference(object, array)
+// V : void
+// (ARM64) Current calling conversion only support 32bit softfp
+// which has problems with long, float, double
+constexpr char arm64_supported_types[] = "ZBSCILV";
+// (x84_64) We still have troubles with compiling longs/doubles/floats
+constexpr char x86_64_supported_types[] = "ZBSCILV";
+
+// TODO: Remove this when we are able to compile everything.
+static bool CanCompileShorty(const char* shorty, InstructionSet instruction_set) {
uint32_t shorty_size = strlen(shorty);
CHECK_GE(shorty_size, 1u);
// Set a limitation on maximum number of parameters.
// Note : there is an implied "method*" parameter, and probably "this" as well.
// 1 is for the return type. Currently, we only accept 2 parameters at the most.
+ // (x86_64): For now we have the same limitation. But we might want to split this
+ // check in future into two separate cases for arm64 and x86_64.
if (shorty_size > (1 + 2)) {
return false;
}
- // Z : boolean
- // B : byte
- // S : short
- // C : char
- // I : int
- // L : long
- // F : float
- // D : double
- // L : reference(object, array)
- // V : void
- // Current calling conversion only support 32bit softfp
- // which has problems with long, float, double
- constexpr char supported_types[] = "ZBSCILV";
+
+ const char* supported_types = arm64_supported_types;
+ if (instruction_set == kX86_64) {
+ supported_types = x86_64_supported_types;
+ }
for (uint32_t i = 0; i < shorty_size; i++) {
if (strchr(supported_types, shorty[i]) == nullptr) {
return false;
@@ -444,14 +732,21 @@
static bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
CompilationUnit& cu) {
// There is some limitation with current ARM 64 backend.
- if (cu.instruction_set == kArm64) {
+ if (cu.instruction_set == kArm64 || cu.instruction_set == kX86_64) {
// Check if we can compile the prototype.
const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
- if (!CanCompileShorty(shorty)) {
+ if (!CanCompileShorty(shorty, cu.instruction_set)) {
VLOG(compiler) << "Unsupported shorty : " << shorty;
return false;
}
+ const int *support_list = arm64_support_list;
+ int support_list_size = arraysize(arm64_support_list);
+ if (cu.instruction_set == kX86_64) {
+ support_list = x86_64_support_list;
+ support_list_size = arraysize(x86_64_support_list);
+ }
+
for (int idx = 0; idx < cu.mir_graph->GetNumBlocks(); idx++) {
BasicBlock *bb = cu.mir_graph->GetBasicBlock(idx);
if (bb == NULL) continue;
@@ -459,8 +754,8 @@
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
// Check if we support the byte code.
- if (std::find(arm64_support_list, arm64_support_list + arraysize(arm64_support_list),
- opcode) == arm64_support_list + arraysize(arm64_support_list)) {
+ if (std::find(support_list, support_list + support_list_size,
+ opcode) == support_list + support_list_size) {
if (opcode < kMirOpFirst) {
VLOG(compiler) << "Unsupported dalvik byte code : "
<< mir->dalvikInsn.opcode;
@@ -479,7 +774,7 @@
uint32_t invoke_method_idx = mir->dalvikInsn.vB;
const char* invoke_method_shorty = dex_file.GetMethodShorty(
dex_file.GetMethodId(invoke_method_idx));
- if (!CanCompileShorty(invoke_method_shorty)) {
+ if (!CanCompileShorty(invoke_method_shorty, cu.instruction_set)) {
VLOG(compiler) << "Unsupported to invoke '"
<< PrettyMethod(invoke_method_idx, dex_file)
<< "' with shorty : " << invoke_method_shorty;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 6d67afb..7049f8c 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
#include "compiler_internals.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utils/scoped_arena_allocator.h"
#include "utils/scoped_arena_containers.h"
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index c3b5a25..1c9e2e2 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -23,7 +23,7 @@
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_options.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utils/scoped_arena_containers.h"
namespace art {
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 7c630e8..98866d9 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -21,10 +21,10 @@
#include "base/logging.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_driver-inl.h"
-#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
-#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in Handle<DexCache>.
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -43,11 +43,12 @@
// We're going to resolve fields and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
@@ -63,7 +64,7 @@
bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
- dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_);
+ dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx, &it->field_offset_);
it->flags_ = 0u | // Without kFlagIsStatic.
(is_volatile ? kFlagIsVolatile : 0u) |
(fast_path.first ? kFlagFastGet : 0u) |
@@ -89,11 +90,12 @@
// We're going to resolve fields and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
@@ -110,7 +112,7 @@
bool is_referrers_class, is_initialized;
std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
- dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_,
+ dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx, &it->field_offset_,
&it->storage_index_, &is_referrers_class, &is_initialized);
it->flags_ = kFlagIsStatic |
(is_volatile ? kFlagIsVolatile : 0u) |
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 2c33ef1..cc2bd95 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -19,10 +19,10 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "driver/compiler_driver-inl.h"
-#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
-#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in Handle<DexCache>.
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -45,11 +45,12 @@
// We're going to resolve methods and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve methods and record all available info.
@@ -73,10 +74,10 @@
MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
int fast_path_flags = compiler_driver->IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, referrer_class.get(), resolved_method, &invoke_type,
+ soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type,
&target_method, devirt_target, &it->direct_code_, &it->direct_method_);
bool needs_clinit =
- compiler_driver->NeedsClassInitialization(referrer_class.get(), resolved_method);
+ compiler_driver->NeedsClassInitialization(referrer_class.Get(), resolved_method);
uint16_t other_flags = it->flags_ &
~(kFlagFastPath | kFlagNeedsClassInitialization | (kInvokeTypeMask << kBitSharpTypeBegin));
it->flags_ = other_flags |
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index d3477c9..5d74b8d 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -268,6 +268,7 @@
MarkPossibleNullPointerException(opt_flags);
LoadConstantNoClobber(rs_r3, 0);
LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL);
+ GenMemBarrier(kStoreLoad);
Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
@@ -285,7 +286,6 @@
LIR* success_target = NewLIR0(kPseudoTargetLabel);
unlock_success_branch->target = success_target;
- GenMemBarrier(kStoreLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
GenNullCheck(rs_r0, opt_flags);
@@ -295,7 +295,11 @@
LoadConstantNoClobber(rs_r3, 0);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
OpRegReg(kOpCmp, rs_r1, rs_r2);
+
LIR* it = OpIT(kCondEq, "EE");
+ if (GenMemBarrier(kStoreLoad)) {
+ UpdateIT(it, "TEE");
+ }
Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
// Go expensive route - UnlockObjectFromCode(obj);
LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
@@ -304,7 +308,6 @@
LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
OpEndIT(it);
MarkSafepointPC(call_inst);
- GenMemBarrier(kStoreLoad);
}
}
@@ -356,10 +359,14 @@
(static_cast<size_t>(frame_size_) <
Thread::kStackOverflowReservedBytes));
NewLIR0(kPseudoMethodEntry);
+ bool large_frame = (static_cast<size_t>(frame_size_) > Thread::kStackOverflowReservedUsableBytes);
if (!skip_overflow_check) {
if (Runtime::Current()->ExplicitStackOverflowChecks()) {
- /* Load stack limit */
- Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
+ if (!large_frame) {
+ /* Load stack limit */
+ LockTemp(rs_r12);
+ Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
+ }
} else {
// Implicit stack overflow check.
// Generate a load from [sp, #-overflowsize]. If this is in the stack
@@ -420,16 +427,26 @@
const bool restore_lr_;
const size_t sp_displace_;
};
- if (static_cast<size_t>(frame_size_) > Thread::kStackOverflowReservedUsableBytes) {
+ if (large_frame) {
+ // Note: may need a temp reg, and we only have r12 free at this point.
OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills);
+ Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr);
// Need to restore LR since we used it as a temp.
AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, spill_size));
OpRegCopy(rs_rARM_SP, rs_rARM_LR); // Establish stack
} else {
- // If the frame is small enough we are guaranteed to have enough space that remains to
- // handle signals on the user stack.
+ /*
+ * If the frame is small enough we are guaranteed to have enough space that remains to
+ * handle signals on the user stack. However, we may not have any free temp
+ * registers at this point, so we'll temporarily add LR to the temp pool.
+ */
+ DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp());
+ MarkTemp(rs_rARM_LR);
+ FreeTemp(rs_rARM_LR);
OpRegRegImm(kOpSub, rs_rARM_SP, rs_rARM_SP, frame_size_without_spills);
+ Clobber(rs_rARM_LR);
+ UnmarkTemp(rs_rARM_LR);
LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr);
AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_));
}
@@ -448,6 +465,7 @@
FreeTemp(rs_r1);
FreeTemp(rs_r2);
FreeTemp(rs_r3);
+ FreeTemp(rs_r12);
}
void ArmMir2Lir::GenExitSequence() {
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 6696cf7..1ee59c6 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -141,7 +141,7 @@
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
- void GenMemBarrier(MemBarrierKind barrier_kind);
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMonitorEnter(int opt_flags, RegLocation rl_src);
void GenMonitorExit(int opt_flags, RegLocation rl_src);
void GenMoveException(RegLocation rl_dest);
@@ -160,6 +160,7 @@
LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpIT(ConditionCode cond, const char* guide);
+ void UpdateIT(LIR* it, const char* new_guide);
void OpEndIT(LIR* it);
LIR* OpMem(OpKind op, RegStorage r_base, int disp);
LIR* OpPcRelLoad(RegStorage reg, LIR* target);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 2e0e559..2d4834c 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -67,6 +67,34 @@
return NewLIR2(kThumb2It, code, mask);
}
+void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
+ int mask;
+ int mask3 = 0;
+ int mask2 = 0;
+ int mask1 = 0;
+ ArmConditionCode code = static_cast<ArmConditionCode>(it->operands[0]);
+ int cond_bit = code & 1;
+ int alt_bit = cond_bit ^ 1;
+
+ // Note: case fallthroughs intentional
+ switch (strlen(new_guide)) {
+ case 3:
+ mask1 = (new_guide[2] == 'T') ? cond_bit : alt_bit;
+ case 2:
+ mask2 = (new_guide[1] == 'T') ? cond_bit : alt_bit;
+ case 1:
+ mask3 = (new_guide[0] == 'T') ? cond_bit : alt_bit;
+ break;
+ case 0:
+ break;
+ default:
+ LOG(FATAL) << "OAT: bad case in UpdateIT";
+ }
+ mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
+ (1 << (3 - strlen(new_guide)));
+ it->operands[1] = mask;
+}
+
void ArmMir2Lir::OpEndIT(LIR* it) {
// TODO: use the 'it' pointer to do some checks with the LIR, for example
// we could check that the number of instructions matches the mask
@@ -934,7 +962,7 @@
return OpCondBranch(c_code, target);
}
-void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+bool ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#if ANDROID_SMP != 0
// Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
LIR* barrier = last_lir_insn_;
@@ -952,15 +980,21 @@
break;
}
+ bool ret = false;
+
// If the same barrier already exists, don't generate another.
if (barrier == nullptr
|| (barrier != nullptr && (barrier->opcode != kThumb2Dmb || barrier->operands[0] != dmb_flavor))) {
barrier = NewLIR1(kThumb2Dmb, dmb_flavor);
+ ret = true;
}
// At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
DCHECK(!barrier->flags.use_def_invalid);
barrier->u.m.def_mask = ENCODE_ALL;
+ return ret;
+#else
+ return false;
#endif
}
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index fe18ed9..b0211d6 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -850,7 +850,7 @@
DCHECK(!r_dest.IsPair());
load = NewLIR3(kThumb2Vldrd, r_dest.GetReg(), r_ptr.GetReg(), encoded_disp);
} else {
- load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(),
+ load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg(),
encoded_disp);
}
if ((displacement & ~1020) != 0 && !r_dest.IsFloat()) {
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 51e97cd..1bcf19b 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -278,6 +278,7 @@
MarkPossibleNullPointerException(opt_flags);
LoadConstantNoClobber(rs_x3, 0);
LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_x1, rs_x2, NULL);
+ GenMemBarrier(kStoreLoad);
Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_x3);
LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
@@ -295,7 +296,6 @@
LIR* success_target = NewLIR0(kPseudoTargetLabel);
unlock_success_branch->target = success_target;
- GenMemBarrier(kStoreLoad);
} else {
// Explicit null-check as slow-path is entered using an IT.
GenNullCheck(rs_x0, opt_flags);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index af0029c..418a989 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -142,7 +142,7 @@
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
- void GenMemBarrier(MemBarrierKind barrier_kind);
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMonitorEnter(int opt_flags, RegLocation rl_src);
void GenMonitorExit(int opt_flags, RegLocation rl_src);
void GenMoveException(RegLocation rl_dest);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 0465249..f2a57e7 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -712,7 +712,7 @@
return OpCondBranch(c_code, target);
}
-void Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#if ANDROID_SMP != 0
// Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
LIR* barrier = last_lir_insn_;
@@ -730,15 +730,21 @@
break;
}
+ bool ret = false;
+
// If the same barrier already exists, don't generate another.
if (barrier == nullptr
|| (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
+ ret = true;
}
// At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
DCHECK(!barrier->flags.use_def_invalid);
barrier->u.m.def_mask = ENCODE_ALL;
+ return ret;
+#else
+ return false;
#endif
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 7aaffcb..24ed4a3 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -801,8 +801,10 @@
const MethodReference& target_method,
uint32_t vtable_idx, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type, bool skip_this) {
- int last_arg_reg = TargetReg(kArg3).GetReg();
- int next_reg = TargetReg(kArg1).GetReg();
+ int last_arg_reg = 3 - 1;
+ int arg_regs[3] = {TargetReg(kArg1).GetReg(), TargetReg(kArg2).GetReg(), TargetReg(kArg3).GetReg()};
+
+ int next_reg = 0;
int next_arg = 0;
if (skip_this) {
next_reg++;
@@ -811,8 +813,8 @@
for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
RegLocation rl_arg = info->args[next_arg++];
rl_arg = UpdateRawLoc(rl_arg);
- if (rl_arg.wide && (next_reg <= TargetReg(kArg2).GetReg())) {
- RegStorage r_tmp(RegStorage::k64BitPair, next_reg, next_reg + 1);
+ if (rl_arg.wide && (next_reg <= last_arg_reg - 1)) {
+ RegStorage r_tmp(RegStorage::k64BitPair, arg_regs[next_reg], arg_regs[next_reg + 1]);
LoadValueDirectWideFixed(rl_arg, r_tmp);
next_reg++;
next_arg++;
@@ -821,7 +823,7 @@
rl_arg = NarrowRegLoc(rl_arg);
rl_arg.is_const = false;
}
- LoadValueDirectFixed(rl_arg, RegStorage::Solo32(next_reg));
+ LoadValueDirectFixed(rl_arg, RegStorage::Solo32(arg_regs[next_reg]));
}
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
@@ -1571,7 +1573,7 @@
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_long) {
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64);
} else {
RegStorage rl_temp_offset = AllocTemp();
@@ -1618,7 +1620,7 @@
RegLocation rl_value;
if (is_long) {
rl_value = LoadValueWide(rl_src_value, kCoreReg);
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg, k64);
} else {
RegStorage rl_temp_offset = AllocTemp();
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index b7ea34f..c5b40da 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -139,7 +139,7 @@
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
- void GenMemBarrier(MemBarrierKind barrier_kind);
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
int first_bit, int second_bit);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 2821209..35345e8 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -433,9 +433,12 @@
FreeTemp(rs_rMIPS_ARG3);
}
-void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#if ANDROID_SMP != 0
NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
+ return true;
+#else
+ return false;
#endif
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 77119a4..2c4ca88 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -37,7 +37,7 @@
}
// TODO: needs revisit for 64-bit.
-RegStorage Mir2Lir::LoadArg(int in_position, bool wide) {
+RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) {
RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
RegStorage::InvalidReg();
@@ -56,28 +56,45 @@
if (wide && !reg_arg_high.Valid()) {
// If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
if (!reg_arg_low.Valid()) {
- RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
- reg_arg_low = new_regs.GetLow();
- reg_arg_high = new_regs.GetHigh();
+ RegStorage new_regs = AllocTypedTempWide(false, reg_class);
LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64);
+ return new_regs; // The reg_class is OK, we can return.
} else {
+ // Assume that no ABI allows splitting a wide fp reg between a narrow fp reg and memory,
+ // i.e. the low part is in a core reg. Load the second part in a core reg as well for now.
+ DCHECK(!reg_arg_low.IsFloat());
reg_arg_high = AllocTemp();
int offset_high = offset + sizeof(uint32_t);
Load32Disp(TargetReg(kSp), offset_high, reg_arg_high);
+ // Continue below to check the reg_class.
}
}
// If the low part is not in a register yet, we need to load it.
if (!reg_arg_low.Valid()) {
- reg_arg_low = AllocTemp();
+ // Assume that if the low part of a wide arg is passed in memory, so is the high part,
+ // thus we don't get here for wide args as it's handled above. Big-endian ABIs could
+ // conceivably break this assumption but Android supports only little-endian architectures.
+ DCHECK(!wide);
+ reg_arg_low = AllocTypedTemp(false, reg_class);
Load32Disp(TargetReg(kSp), offset, reg_arg_low);
+ return reg_arg_low; // The reg_class is OK, we can return.
}
- if (wide) {
- return RegStorage::MakeRegPair(reg_arg_low, reg_arg_high);
- } else {
- return reg_arg_low;
+ RegStorage reg_arg = wide ? RegStorage::MakeRegPair(reg_arg_low, reg_arg_high) : reg_arg_low;
+ // Check if we need to copy the arg to a different reg_class.
+ if (!RegClassMatches(reg_class, reg_arg)) {
+ if (wide) {
+ RegStorage new_regs = AllocTypedTempWide(false, reg_class);
+ OpRegCopyWide(new_regs, reg_arg);
+ reg_arg = new_regs;
+ } else {
+ RegStorage new_reg = AllocTypedTemp(false, reg_class);
+ OpRegCopy(new_reg, reg_arg);
+ reg_arg = new_reg;
+ }
}
+ return reg_arg;
}
void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
@@ -138,16 +155,29 @@
// Point of no return - no aborts after this
GenPrintLabel(mir);
LockArg(data.object_arg);
+ RegStorage reg_obj = LoadArg(data.object_arg, kCoreReg);
RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
- RegStorage reg_obj = LoadArg(data.object_arg);
+ RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
+ RegStorage r_result = rl_dest.reg;
+ if (!RegClassMatches(reg_class, r_result)) {
+ r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class)
+ : AllocTypedTemp(rl_dest.fp, reg_class);
+ }
if (data.is_volatile) {
- LoadBaseDispVolatile(reg_obj, data.field_offset, rl_dest.reg, size);
+ LoadBaseDispVolatile(reg_obj, data.field_offset, r_result, size);
// Without context sensitive analysis, we must issue the most conservative barriers.
// In this case, either a load or store may follow so we issue both barriers.
GenMemBarrier(kLoadLoad);
GenMemBarrier(kLoadStore);
} else {
- LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg, size);
+ LoadBaseDisp(reg_obj, data.field_offset, r_result, size);
+ }
+ if (r_result != rl_dest.reg) {
+ if (wide) {
+ OpRegCopyWide(rl_dest.reg, r_result);
+ } else {
+ OpRegCopy(rl_dest.reg, r_result);
+ }
}
return true;
}
@@ -175,8 +205,9 @@
GenPrintLabel(mir);
LockArg(data.object_arg);
LockArg(data.src_arg, wide);
- RegStorage reg_obj = LoadArg(data.object_arg);
- RegStorage reg_src = LoadArg(data.src_arg, wide);
+ RegStorage reg_obj = LoadArg(data.object_arg, kCoreReg);
+ RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
+ RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide);
if (data.is_volatile) {
// There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 77e5649..836d2ac 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1192,8 +1192,9 @@
* barrier, then it will be used as such. Otherwise, a new LIR will be generated
* that can keep the semantics.
* @param barrier_kind The kind of memory barrier to generate.
+ * @return whether a new instruction was generated.
*/
- virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0;
+ virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0;
virtual void GenMoveException(RegLocation rl_dest) = 0;
virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -1397,7 +1398,7 @@
* @param wide Whether the argument is 64-bit or not.
* @return Returns the register (or register pair) for the loaded argument.
*/
- RegStorage LoadArg(int in_position, bool wide = false);
+ RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false);
/**
* @brief Used to load a VR argument directly to a specified register location.
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 11e7ff9..ef8c33c 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -142,7 +142,7 @@
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
- void GenMemBarrier(MemBarrierKind barrier_kind);
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
int first_bit, int second_bit);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 2db9845..e3312a2 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -421,11 +421,12 @@
return false;
}
-void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
#if ANDROID_SMP != 0
// Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
LIR* mem_barrier = last_lir_insn_;
+ bool ret = false;
/*
* According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
* (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
@@ -435,11 +436,13 @@
// If no LIR exists already that can be used a barrier, then generate an mfence.
if (mem_barrier == nullptr) {
mem_barrier = NewLIR0(kX86Mfence);
+ ret = true;
}
// If last instruction does not provide full barrier, then insert an mfence.
if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
mem_barrier = NewLIR0(kX86Mfence);
+ ret = true;
}
}
@@ -451,6 +454,9 @@
DCHECK(!mem_barrier->flags.use_def_invalid);
mem_barrier->u.m.def_mask = ENCODE_ALL;
}
+ return ret;
+#else
+ return false;
#endif
}
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 0f812a4..e19f3cf 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -34,7 +34,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "verifier/dex_gc_map.h"
#include "verifier/method_verifier.h"
#include "verifier/method_verifier-inl.h"
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index d9f2a3a..08fd386 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -28,7 +28,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -42,10 +42,10 @@
}
inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
- DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
const DexFile::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
mirror::Class* referrer_class = mUnit->GetClassLinker()->ResolveType(
@@ -59,11 +59,11 @@
}
inline mirror::ArtField* CompilerDriver::ResolveField(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
- DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
*mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
@@ -165,11 +165,11 @@
}
inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type) {
DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ DCHECK(class_loader.Get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
*mUnit->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending());
@@ -206,8 +206,8 @@
}
inline int CompilerDriver::IsFastInvoke(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method) {
@@ -217,7 +217,7 @@
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
if (UNLIKELY(!referrer_class->CanAccessResolvedMethod(methods_class, resolved_method,
- dex_cache.get(),
+ dex_cache.Get(),
target_method->dex_method_index))) {
return 0;
}
@@ -237,7 +237,7 @@
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
CHECK(target_method->dex_file == mUnit->GetDexFile());
- DCHECK(dex_cache.get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ DCHECK(dex_cache.Get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
resolved_method) << PrettyMethod(resolved_method);
int stats_flags = kFlagMethodResolved;
@@ -259,8 +259,9 @@
devirt_target->dex_method_index,
dex_cache, class_loader, NULL, kVirtual);
} else {
- SirtRef<mirror::DexCache> target_dex_cache(soa.Self(),
- class_linker->FindDexCache(*devirt_target->dex_file));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::DexCache> target_dex_cache(
+ hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file)));
called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
devirt_target->dex_method_index,
target_dex_cache, class_loader, NULL, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6817f14..55ba643 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -49,7 +49,7 @@
#include "mirror/throwable.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_pool.h"
#include "trampolines/trampoline_compiler.h"
@@ -377,8 +377,10 @@
compiler_->Init();
CHECK(!Runtime::Current()->IsStarted());
- if (!image_) {
- CHECK(image_classes_.get() == NULL);
+ if (image_) {
+ CHECK(image_classes_.get() != nullptr);
+ } else {
+ CHECK(image_classes_.get() == nullptr);
}
// Are we generating CFI information?
@@ -509,7 +511,7 @@
}
static DexToDexCompilationLevel GetDexToDexCompilationlevel(
- Thread* self, SirtRef<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
+ Thread* self, Handle<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -524,7 +526,7 @@
// function). Since image classes can be verified again while compiling an application,
// we must prevent the DEX-to-DEX compiler from introducing them.
// TODO: find a way to enable "quick" instructions for image classes and remove this check.
- bool compiling_image_classes = class_loader.get() == nullptr;
+ bool compiling_image_classes = class_loader.Get() == nullptr;
if (compiling_image_classes) {
return kRequired;
} else if (klass->IsVerified()) {
@@ -574,8 +576,9 @@
{
ScopedObjectAccess soa(Thread::Current());
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(self, class_loader, *dex_file,
class_def);
}
@@ -591,7 +594,7 @@
ThreadPool* thread_pool, TimingLogger* timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
ResolveDexFile(class_loader, *dex_file, thread_pool, timings);
}
}
@@ -689,6 +692,7 @@
// Make a list of descriptors for classes to include in the image
void CompilerDriver::LoadImageClasses(TimingLogger* timings)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
+ CHECK(timings != nullptr);
if (!IsImage()) {
return;
}
@@ -698,10 +702,13 @@
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ CHECK(image_classes_.get() != nullptr);
for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
const std::string& descriptor(*it);
- SirtRef<mirror::Class> klass(self, class_linker->FindSystemClass(self, descriptor.c_str()));
- if (klass.get() == NULL) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
+ if (klass.Get() == NULL) {
VLOG(compiler) << "Failed to find class " << descriptor;
image_classes_->erase(it++);
self->ClearException();
@@ -714,8 +721,9 @@
// exceptions are resolved by the verifier when there is a catch block in an interested method.
// Do this here so that exception classes appear to have been specified image classes.
std::set<std::pair<uint16_t, const DexFile*> > unresolved_exception_types;
- SirtRef<mirror::Class> java_lang_Throwable(self,
- class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> java_lang_Throwable(
+ hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor,
@@ -723,16 +731,17 @@
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*dex_file));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
- SirtRef<mirror::Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
- dex_cache, class_loader));
- if (klass.get() == NULL) {
+ StackHandleScope<3> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(*dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
+ Handle<mirror::Class> klass(hs.NewHandle(
+ class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache, class_loader)));
+ if (klass.Get() == NULL) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
LOG(FATAL) << "Failed to resolve class " << descriptor;
}
- DCHECK(java_lang_Throwable->IsAssignableFrom(klass.get()));
+ DCHECK(java_lang_Throwable->IsAssignableFrom(klass.Get()));
}
// Resolving exceptions may load classes that reference more exceptions, iterate until no
// more are found
@@ -816,7 +825,9 @@
if (IsImage()) {
// We resolve all const-string strings when building for the image.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), Runtime::Current()->GetClassLinker()->FindDexCache(dex_file));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(dex_file)));
Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache);
result = true;
}
@@ -980,16 +991,17 @@
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
- ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, false));
- referrer_class = (resolved_field_sirt.get() != nullptr)
- ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
- resolved_field = resolved_field_sirt.get();
- dex_cache = dex_cache_sirt.get();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache_handle(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader_handle(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
+ Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, false)));
+ referrer_class = (resolved_field_handle.Get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
+ resolved_field = resolved_field_handle.Get();
+ dex_cache = dex_cache_handle.Get();
}
bool result = false;
if (resolved_field != nullptr && referrer_class != nullptr) {
@@ -1017,16 +1029,17 @@
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
- ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, true));
- referrer_class = (resolved_field_sirt.get() != nullptr)
- ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
- resolved_field = resolved_field_sirt.get();
- dex_cache = dex_cache_sirt.get();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache_handle(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader_handle(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
+ Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, true)));
+ referrer_class = (resolved_field_handle.Get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
+ resolved_field = resolved_field_handle.Get();
+ dex_cache = dex_cache_handle.Get();
}
bool result = false;
if (resolved_field != nullptr && referrer_class != nullptr) {
@@ -1168,17 +1181,18 @@
// Try to resolve the method and compiling method's class.
mirror::ArtMethod* resolved_method;
mirror::Class* referrer_class;
- SirtRef<mirror::DexCache> dex_cache(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
{
uint32_t method_idx = target_method->dex_method_index;
- SirtRef<mirror::ArtMethod> resolved_method_sirt(soa.Self(),
- ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type));
- referrer_class = (resolved_method_sirt.get() != nullptr)
+ Handle<mirror::ArtMethod> resolved_method_handle(hs.NewHandle(
+ ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type)));
+ referrer_class = (resolved_method_handle.Get() != nullptr)
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
- resolved_method = resolved_method_sirt.get();
+ resolved_method = resolved_method_handle.Get();
}
bool result = false;
if (resolved_method != nullptr) {
@@ -1196,7 +1210,7 @@
// Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts.
if (UNLIKELY(referrer_class == nullptr) ||
UNLIKELY(!referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
- resolved_method, dex_cache.get(),
+ resolved_method, dex_cache.Get(),
target_method->dex_method_index)) ||
*invoke_type == kSuper) {
// Slow path. (Without devirtualization, all super calls go slow path as well.)
@@ -1469,8 +1483,10 @@
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
if (!SkipClass(class_linker, jclass_loader, dex_file, class_def)) {
ScopedObjectAccess soa(self);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
// Resolve the class.
mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
class_loader);
@@ -1556,9 +1572,10 @@
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = manager->GetClassLinker();
const DexFile& dex_file = *manager->GetDexFile();
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == NULL) {
@@ -1611,11 +1628,12 @@
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager->GetClassLinker();
jobject jclass_loader = manager->GetClassLoader();
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::Class> klass(soa.Self(), class_linker->FindClass(soa.Self(), descriptor,
- class_loader));
- if (klass.get() == nullptr) {
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
@@ -1624,7 +1642,7 @@
* This is to ensure the class is structurally sound for compilation. An unsound class
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
std::string error_msg;
if (verifier::MethodVerifier::VerifyClass(&dex_file, dex_cache, class_loader, &class_def, true,
&error_msg) ==
@@ -1632,8 +1650,8 @@
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
}
- } else if (!SkipClass(jclass_loader, dex_file, klass.get())) {
- CHECK(klass->IsResolved()) << PrettyClass(klass.get());
+ } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
+ CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
class_linker->VerifyClass(klass);
if (klass->IsErroneous()) {
@@ -1643,7 +1661,7 @@
}
CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
- << PrettyDescriptor(klass.get()) << ": state=" << klass->GetStatus();
+ << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
}
soa.Self()->AssertNoPendingException();
}
@@ -1666,13 +1684,13 @@
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::Class> klass(soa.Self(),
- manager->GetClassLinker()->FindClass(soa.Self(), descriptor,
- class_loader));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass.get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.get())) {
+ if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
// Only try to initialize classes that were successfully verified.
if (klass->IsVerified()) {
// Attempt to initialize the class but bail if we either need to initialize the super-class
@@ -1687,8 +1705,8 @@
// parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
// We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
// than use a special Object for the purpose we use the Class of java.lang.Class.
- SirtRef<mirror::Class> sirt_klass(soa.Self(), klass->GetClass());
- ObjectLock<mirror::Class> lock(soa.Self(), &sirt_klass);
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+ ObjectLock<mirror::Class> lock(soa.Self(), &h_klass);
// Attempt to initialize allowing initialization of parent classes but still not static
// fields.
manager->GetClassLinker()->EnsureInitialized(klass, false, true);
@@ -1803,8 +1821,9 @@
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(soa.Self(), class_loader, dex_file,
class_def);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 6ac9cf7..f3db41f 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -52,7 +52,7 @@
class OatWriter;
class ParallelCompilationManager;
class ScopedObjectAccess;
-template<class T> class SirtRef;
+template<class T> class Handle;
class TimingLogger;
class VerificationResults;
class VerifiedMethod;
@@ -221,15 +221,15 @@
// Resolve compiling method's class. Returns nullptr on failure.
mirror::Class* ResolveCompilingMethodsClass(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a field. Returns nullptr on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
mirror::ArtField* ResolveField(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -258,8 +258,8 @@
// Resolve a method. Returns nullptr on failure, including incompatible class change.
mirror::ArtMethod* ResolveMethod(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -277,8 +277,8 @@
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
// for ProcessedInvoke() and computes the necessary lowering info.
int IsFastInvoke(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 86034c8..fe3a4e6 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -19,7 +19,7 @@
#include <stdint.h>
#include <stdio.h>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "class_linker.h"
#include "common_compiler_test.h"
#include "dex_file.h"
@@ -30,7 +30,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -80,7 +80,9 @@
const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(class_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
CHECK(c != NULL);
for (size_t i = 0; i < c->NumDirectMethods(); i++) {
@@ -150,7 +152,8 @@
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> null_loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
CompileVirtualMethod(null_loader, "java.lang.Class", "isFinalizable", "()Z");
CompileDirectMethod(null_loader, "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index 6fd4a73..571a091 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -22,7 +22,7 @@
#include "base/stringprintf.h"
#include "elf_file.h"
#include "elf_writer.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/compiler/elf_stripper.cc b/compiler/elf_stripper.cc
index 42291b2..b0fa63c 100644
--- a/compiler/elf_stripper.cc
+++ b/compiler/elf_stripper.cc
@@ -20,7 +20,7 @@
#include <sys/types.h>
#include <vector>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "base/logging.h"
#include "elf_file.h"
#include "elf_utils.h"
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index f688103..aa4a5b2 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -361,8 +361,9 @@
ClassLinker* linker = Runtime::Current()->GetClassLinker();
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
method = linker->ResolveMethod(dex_file, method_idx, dex_cache, class_loader, NULL, invoke_type);
CHECK(method != NULL);
}
diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h
index 13757ed..3c1a47b 100644
--- a/compiler/elf_writer_mclinker.h
+++ b/compiler/elf_writer_mclinker.h
@@ -19,7 +19,7 @@
#include "elf_writer.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "safe_map.h"
namespace mcld {
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 7c5741b..5a79542 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -27,7 +27,7 @@
#include "lock_word.h"
#include "mirror/object-inl.h"
#include "signal_catcher.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utils.h"
#include "vector_output_stream.h"
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 62817e7..20a66d4 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -51,8 +51,8 @@
#include "object_utils.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
-#include "UniquePtr.h"
+#include "handle_scope-inl.h"
+#include "UniquePtrCompat.h"
#include "utils.h"
using ::art::mirror::ArtField;
@@ -382,16 +382,14 @@
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- Thread* self = Thread::Current();
- SirtRef<Object> sirt_obj(self, obj);
- mirror::String* interned = obj->AsString()->Intern();
- if (sirt_obj.get() != interned) {
+ mirror::String* const interned = obj->AsString()->Intern();
+ if (obj != interned) {
if (!IsImageOffsetAssigned(interned)) {
// interned obj is after us, allocate its location early
AssignImageOffset(interned);
}
// point those looking for this object to the interned version.
- SetImageOffset(sirt_obj.get(), GetImageOffset(interned));
+ SetImageOffset(obj, GetImageOffset(interned));
return;
}
// else (obj == interned), nothing to do but fall through to the normal case
@@ -404,20 +402,22 @@
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
Thread* self = Thread::Current();
- SirtRef<Class> object_array_class(self, class_linker->FindSystemClass(self,
- "[Ljava/lang/Object;"));
+ StackHandleScope<3> hs(self);
+ Handle<Class> object_array_class(hs.NewHandle(
+ class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
// build an Object[] of all the DexCaches used in the source_space_
- ObjectArray<Object>* dex_caches = ObjectArray<Object>::Alloc(self, object_array_class.get(),
- class_linker->GetDexCaches().size());
+ Handle<ObjectArray<Object>> dex_caches(
+ hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(),
+ class_linker->GetDexCaches().size())));
int i = 0;
for (DexCache* dex_cache : class_linker->GetDexCaches()) {
dex_caches->Set<false>(i++, dex_cache);
}
// build an Object[] of the roots needed to restore the runtime
- SirtRef<ObjectArray<Object> > image_roots(
- self, ObjectArray<Object>::Alloc(self, object_array_class.get(), ImageHeader::kImageRootsMax));
+ Handle<ObjectArray<Object> > image_roots(hs.NewHandle(
+ ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
@@ -427,27 +427,28 @@
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod,
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
- image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches);
+ image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
CHECK(image_roots->Get(i) != NULL);
}
- return image_roots.get();
+ return image_roots.Get();
}
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) {
// Visit fields of parent classes first.
- SirtRef<mirror::Class> sirt_class(Thread::Current(), klass);
- mirror::Class* super = sirt_class->GetSuperClass();
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ mirror::Class* super = h_class->GetSuperClass();
if (super != nullptr) {
WalkInstanceFields(obj, super);
}
//
- size_t num_reference_fields = sirt_class->NumReferenceInstanceFields();
+ size_t num_reference_fields = h_class->NumReferenceInstanceFields();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = sirt_class->GetInstanceField(i);
+ mirror::ArtField* field = h_class->GetInstanceField(i);
MemberOffset field_offset = field->GetOffset();
mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
@@ -460,28 +461,28 @@
void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
if (!IsImageOffsetAssigned(obj)) {
// Walk instance fields of all objects
- Thread* self = Thread::Current();
- SirtRef<mirror::Object> sirt_obj(self, obj);
- SirtRef<mirror::Class> klass(self, obj->GetClass());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::Object> h_obj(hs.NewHandle(obj));
+ Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass()));
// visit the object itself.
- CalculateObjectOffsets(sirt_obj.get());
- WalkInstanceFields(sirt_obj.get(), klass.get());
+ CalculateObjectOffsets(h_obj.Get());
+ WalkInstanceFields(h_obj.Get(), klass.Get());
// Walk static fields of a Class.
- if (sirt_obj->IsClass()) {
+ if (h_obj->IsClass()) {
size_t num_static_fields = klass->NumReferenceStaticFields();
for (size_t i = 0; i < num_static_fields; ++i) {
mirror::ArtField* field = klass->GetStaticField(i);
MemberOffset field_offset = field->GetOffset();
- mirror::Object* value = sirt_obj->GetFieldObject<mirror::Object>(field_offset);
+ mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
}
}
- } else if (sirt_obj->IsObjectArray()) {
+ } else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
- int32_t length = sirt_obj->AsObjectArray<mirror::Object>()->GetLength();
+ int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
for (int32_t i = 0; i < length; i++) {
- mirror::ObjectArray<mirror::Object>* obj_array = sirt_obj->AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>();
mirror::Object* value = obj_array->Get(i);
if (value != nullptr) {
WalkFieldsInOrder(value);
@@ -500,7 +501,8 @@
void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
Thread* self = Thread::Current();
- SirtRef<ObjectArray<Object> > image_roots(self, CreateImageRoots());
+ StackHandleScope<1> hs(self);
+ Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots()));
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK_EQ(0U, image_end_);
@@ -533,7 +535,7 @@
static_cast<uint32_t>(image_end_),
RoundUp(image_end_, kPageSize),
RoundUp(bitmap_bytes, kPageSize),
- PointerToLowMemUInt32(GetImageAddress(image_roots.get())),
+ PointerToLowMemUInt32(GetImageAddress(image_roots.Get())),
oat_file_->GetOatHeader().GetChecksum(),
PointerToLowMemUInt32(oat_file_begin),
PointerToLowMemUInt32(oat_data_begin_),
@@ -691,9 +693,10 @@
static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*patch->GetTargetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile())));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
patch->GetTargetMethodIdx(),
dex_cache,
@@ -714,9 +717,9 @@
static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(patch->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile())));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
Class* klass = class_linker->ResolveType(patch->GetDexFile(),
patch->GetTargetTypeIdx(),
dex_cache,
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 7e22a96..f8df2bb 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -30,7 +30,7 @@
#include "os.h"
#include "safe_map.h"
#include "gc/space/space.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 6b5e55e..561d00f 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -31,7 +31,7 @@
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_bar(JNIEnv*, jobject, jint count) {
return count + 1;
@@ -48,7 +48,9 @@
void CompileForTest(jobject class_loader, bool direct,
const char* method_name, const char* method_sig) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(class_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
// Compile the native method before starting the runtime
mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader);
mirror::ArtMethod* method;
@@ -153,8 +155,9 @@
ScopedObjectAccess soa(Thread::Current());
std::string reason;
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(class_loader_));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
ASSERT_TRUE(
Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
@@ -169,8 +172,9 @@
ScopedObjectAccess soa(Thread::Current());
std::string reason;
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(class_loader_));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
ASSERT_TRUE(
Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index 0c14346..d2f54f8 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -98,7 +98,7 @@
arg_begin = arg_iter;
// Count the number of Object* arguments
- uint32_t sirt_size = 1;
+ uint32_t handle_scope_size = 1;
// "this" object pointer for non-static
// "class" object pointer for static
for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
@@ -106,12 +106,12 @@
arg_iter->setName(StringPrintf("a%u", i));
#endif
if (arg_iter->getType() == irb_.getJObjectTy()) {
- ++sirt_size;
+ ++handle_scope_size;
}
}
// Shadow stack
- ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(sirt_size);
+ ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(handle_scope_size);
::llvm::AllocaInst* shadow_frame_ = irb_.CreateAlloca(shadow_frame_type);
// Store the dex pc
@@ -123,7 +123,7 @@
// Push the shadow frame
::llvm::Value* shadow_frame_upcast = irb_.CreateConstGEP2_32(shadow_frame_, 0, 0);
::llvm::Value* old_shadow_frame =
- irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, sirt_size);
+ irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, handle_scope_size);
// Get JNIEnv
::llvm::Value* jni_env_object_addr =
@@ -148,35 +148,35 @@
// Variables for GetElementPtr
::llvm::Value* gep_index[] = {
irb_.getInt32(0), // No displacement for shadow frame pointer
- irb_.getInt32(1), // SIRT
+ irb_.getInt32(1), // handle scope
NULL,
};
- size_t sirt_member_index = 0;
+ size_t handle_scope_member_index = 0;
- // Store the "this object or class object" to SIRT
- gep_index[2] = irb_.getInt32(sirt_member_index++);
- ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+ // Store the "this object or class object" to handle scope
+ gep_index[2] = irb_.getInt32(handle_scope_member_index++);
+ ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(this_object_or_class_object, sirt_field_addr, kTBAAShadowFrame);
+ irb_.CreateStore(this_object_or_class_object, handle_scope_field_addr, kTBAAShadowFrame);
// Push the "this object or class object" to out args
- this_object_or_class_object = irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy());
+ this_object_or_class_object = irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy());
args.push_back(this_object_or_class_object);
- // Store arguments to SIRT, and push back to args
+ // Store arguments to handle scope, and push back to args
for (arg_iter = arg_begin; arg_iter != arg_end; ++arg_iter) {
if (arg_iter->getType() == irb_.getJObjectTy()) {
- // Store the reference type arguments to SIRT
- gep_index[2] = irb_.getInt32(sirt_member_index++);
- ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+ // Store the reference type arguments to handle scope
+ gep_index[2] = irb_.getInt32(handle_scope_member_index++);
+ ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(arg_iter, sirt_field_addr, kTBAAShadowFrame);
- // Note null is placed in the SIRT but the jobject passed to the native code must be null
- // (not a pointer into the SIRT as with regular references).
+ irb_.CreateStore(arg_iter, handle_scope_field_addr, kTBAAShadowFrame);
+ // Note null is placed in the handle scope but the jobject passed to the native code must be null
+ // (not a pointer into the handle scope as with regular references).
::llvm::Value* equal_null = irb_.CreateICmpEQ(arg_iter, irb_.getJNull());
::llvm::Value* arg =
irb_.CreateSelect(equal_null,
irb_.getJNull(),
- irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy()));
+ irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy()));
args.push_back(arg);
} else {
args.push_back(arg_iter);
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index ae18d2e..649a80f 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -144,10 +144,10 @@
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t ArmJniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 6212a23..ffd27ee 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -197,10 +197,10 @@
size_t Arm64JniCallingConvention::FrameSize() {
// Method*, callee save area size, local reference segment state
size_t frame_data_size = ((1 + CalleeSaveRegisters().size()) * kFramePointerSize) + sizeof(uint32_t);
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t Arm64JniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index a99a4c2..95c2d40 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -126,8 +126,8 @@
}
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
- size_t references_size = sirt_pointer_size_ * ReferenceCount(); // size excluding header
- return FrameOffset(SirtReferencesOffset().Int32Value() + references_size);
+ size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header
+ return FrameOffset(HandleerencesOffset().Int32Value() + references_size);
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
@@ -219,13 +219,13 @@
}
}
-// Return position of SIRT entry holding reference at the current iterator
+// Return position of handle scope entry holding reference at the current iterator
// position
-FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
+FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() {
CHECK(IsCurrentParamAReference());
- CHECK_LT(SirtLinkOffset(), SirtNumRefsOffset());
- int result = SirtReferencesOffset().Int32Value() + itr_refs_ * sirt_pointer_size_;
- CHECK_GT(result, SirtNumRefsOffset().Int32Value());
+ CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset());
+ int result = HandleerencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_;
+ CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value());
return FrameOffset(result);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 18afd58..2a6e7d9 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include <vector>
-#include "stack_indirect_reference_table.h"
+#include "handle_scope.h"
#include "thread.h"
#include "utils/managed_register.h"
@@ -73,7 +73,7 @@
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
- sirt_pointer_size_(sizeof(StackReference<mirror::Object>)),
+ handle_scope_pointer_size_(sizeof(StackReference<mirror::Object>)),
is_static_(is_static), is_synchronized_(is_synchronized),
shorty_(shorty) {
num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
@@ -197,8 +197,8 @@
FrameOffset displacement_;
// The size of a reference.
const size_t frame_pointer_size_;
- // The size of a reference entry within the SIRT.
- const size_t sirt_pointer_size_;
+ // The size of a reference entry within the handle scope.
+ const size_t handle_scope_pointer_size_;
private:
const bool is_static_;
@@ -315,26 +315,25 @@
virtual FrameOffset CurrentParamStackOffset() = 0;
// Iterator interface extension for JNI
- FrameOffset CurrentParamSirtEntryOffset();
+ FrameOffset CurrentParamHandleScopeEntryOffset();
- // Position of SIRT and interior fields
- FrameOffset SirtOffset() const {
+ // Position of handle scope and interior fields
+ FrameOffset HandleScopeOffset() const {
return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_); // above Method*
}
- FrameOffset SirtLinkOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::LinkOffset(frame_pointer_size_));
+ FrameOffset HandleScopeLinkOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::LinkOffset(frame_pointer_size_));
}
- FrameOffset SirtNumRefsOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::NumberOfReferencesOffset(frame_pointer_size_));
+ FrameOffset HandleScopeNumRefsOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() +
+ HandleScope::NumberOfReferencesOffset(frame_pointer_size_));
}
- FrameOffset SirtReferencesOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::ReferencesOffset(frame_pointer_size_));
+ FrameOffset HandleerencesOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() +
+ HandleScope::ReferencesOffset(frame_pointer_size_));
}
virtual ~JniCallingConvention() {}
@@ -350,7 +349,7 @@
size_t frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
- // Number of stack slots for outgoing arguments, above which the SIRT is
+ // Number of stack slots for outgoing arguments, above which the handle scope is
// located
virtual size_t NumberOfOutgoingStackArgs() = 0;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 5a22170..02d6fa5 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -33,7 +33,7 @@
#include "utils/mips/managed_register_mips.h"
#include "utils/x86/managed_register_x86.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#define __ jni_asm->
@@ -103,54 +103,54 @@
const std::vector<ManagedRegister>& callee_save_regs = main_jni_conv->CalleeSaveRegisters();
__ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
- // 2. Set up the StackIndirectReferenceTable
+ // 2. Set up the HandleScope
mr_conv->ResetIterator(FrameOffset(frame_size));
main_jni_conv->ResetIterator(FrameOffset(0));
- __ StoreImmediateToFrame(main_jni_conv->SirtNumRefsOffset(),
+ __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(),
main_jni_conv->ReferenceCount(),
mr_conv->InterproceduralScratchRegister());
if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(main_jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset<8>(),
+ __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
+ Thread::TopHandleScopeOffset<8>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopSirtOffset<8>(),
- main_jni_conv->SirtOffset(),
+ __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
+ main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
} else {
- __ CopyRawPtrFromThread32(main_jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset<4>(),
+ __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
+ Thread::TopHandleScopeOffset<4>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopSirtOffset<4>(),
- main_jni_conv->SirtOffset(),
+ __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
+ main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
}
- // 3. Place incoming reference arguments into SIRT
+ // 3. Place incoming reference arguments into handle scope
main_jni_conv->Next(); // Skip JNIEnv*
// 3.5. Create Class argument for static methods out of passed method
if (is_static) {
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
- // Check sirt offset is within frame
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame
+ CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
__ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
mr_conv->MethodRegister(), mirror::ArtMethod::DeclaringClassOffset());
__ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
- __ StoreRef(sirt_offset, main_jni_conv->InterproceduralScratchRegister());
- main_jni_conv->Next(); // in SIRT so move to next argument
+ __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister());
+ main_jni_conv->Next(); // in handle scope so move to next argument
}
while (mr_conv->HasNext()) {
CHECK(main_jni_conv->HasNext());
bool ref_param = main_jni_conv->IsCurrentParamAReference();
CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
- // References need placing in SIRT and the entry value passing
+ // References need placing in handle scope and the entry value passing
if (ref_param) {
- // Compute SIRT entry, note null is placed in the SIRT but its boxed value
+ // Compute handle scope entry, note null is placed in the handle scope but its boxed value
// must be NULL
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
- // Check SIRT offset is within frame and doesn't run into the saved segment state
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
- CHECK_NE(sirt_offset.Uint32Value(),
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame and doesn't run into the saved segment state
+ CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
+ CHECK_NE(handle_scope_offset.Uint32Value(),
main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
bool input_in_reg = mr_conv->IsCurrentParamInRegister();
bool input_on_stack = mr_conv->IsCurrentParamOnStack();
@@ -159,11 +159,11 @@
if (input_in_reg) {
ManagedRegister in_reg = mr_conv->CurrentParamRegister();
__ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
- __ StoreRef(sirt_offset, in_reg);
+ __ StoreRef(handle_scope_offset, in_reg);
} else if (input_on_stack) {
FrameOffset in_off = mr_conv->CurrentParamStackOffset();
__ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
- __ CopyRef(sirt_offset, in_off,
+ __ CopyRef(handle_scope_offset, in_off,
mr_conv->InterproceduralScratchRegister());
}
}
@@ -197,20 +197,20 @@
ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
: QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
- FrameOffset locked_object_sirt_offset(0);
+ FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
// Pass object for locking.
main_jni_conv->Next(); // Skip JNIEnv.
- locked_object_sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+ locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
mr_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
main_jni_conv->Next();
@@ -274,15 +274,15 @@
mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv*
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, sirt_offset,
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset,
mr_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
}
@@ -369,12 +369,12 @@
// Pass object for unlocking.
if (end_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
end_jni_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
end_jni_conv->Next();
@@ -438,7 +438,7 @@
size_t frame_size, size_t out_arg_size) {
bool input_in_reg = mr_conv->IsCurrentParamInRegister();
bool output_in_reg = jni_conv->IsCurrentParamInRegister();
- FrameOffset sirt_offset(0);
+ FrameOffset handle_scope_offset(0);
bool null_allowed = false;
bool ref_param = jni_conv->IsCurrentParamAReference();
CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
@@ -449,21 +449,21 @@
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in SIRT and the entry address passing
+ // References need placing in handle scope and the entry address passing
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
- // Compute SIRT offset. Note null is placed in the SIRT but the jobject
- // passed to the native code must be null (not a pointer into the SIRT
+ // Compute handle scope offset. Note null is placed in the handle scope but the jobject
+ // passed to the native code must be null (not a pointer into the handle scope
// as with regular references).
- sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- // Check SIRT offset is within frame.
- CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
+ handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame.
+ CHECK_LT(handle_scope_offset.Uint32Value(), (frame_size + out_arg_size));
}
if (input_in_reg && output_in_reg) {
ManagedRegister in_reg = mr_conv->CurrentParamRegister();
ManagedRegister out_reg = jni_conv->CurrentParamRegister();
if (ref_param) {
- __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed);
} else {
if (!mr_conv->IsCurrentParamOnStack()) {
// regular non-straddling move
@@ -475,7 +475,7 @@
} else if (!input_in_reg && !output_in_reg) {
FrameOffset out_off = jni_conv->CurrentParamStackOffset();
if (ref_param) {
- __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
null_allowed);
} else {
FrameOffset in_off = mr_conv->CurrentParamStackOffset();
@@ -489,7 +489,7 @@
// Check that incoming stack arguments are above the current stack frame.
CHECK_GT(in_off.Uint32Value(), frame_size);
if (ref_param) {
- __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -502,8 +502,8 @@
// Check outgoing argument is within frame
CHECK_LT(out_off.Uint32Value(), frame_size);
if (ref_param) {
- // TODO: recycle value in in_reg rather than reload from SIRT
- __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+ // TODO: recycle value in in_reg rather than reload from handle scope
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 8e1c0c7..0402fe6 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -148,10 +148,10 @@
size_t MipsJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t MipsJniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 153f953..97b4cdf 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -125,10 +125,10 @@
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t X86JniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 4dfa29a..4871c87 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -133,17 +133,17 @@
}
uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
- return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 | 1 << R13 |
+ return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 |
1 << kNumberOfCpuRegisters;
}
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus link_ (pointer) and number_of_references_ (uint32_t) for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t X86_64JniCallingConvention::OutArgSize() {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index b5d3923..ce35d0f 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -17,11 +17,12 @@
#include "common_compiler_test.h"
#include "compiler/compiler.h"
#include "compiler/oat_writer.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "oat_file.h"
+#include "oat_file-inl.h"
#include "vector_output_stream.h"
namespace art {
@@ -152,7 +153,8 @@
num_virtual_methods = it.NumVirtualMethods();
}
const char* descriptor = dex_file->GetClassDescriptor(class_def);
- SirtRef<mirror::ClassLoader> loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor, loader);
const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
@@ -176,8 +178,9 @@
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
EXPECT_EQ(80U, sizeof(OatHeader));
- EXPECT_EQ(20U, sizeof(OatMethodOffsets));
- EXPECT_EQ(12U, sizeof(OatMethodHeader));
+ EXPECT_EQ(8U, sizeof(OatMethodOffsets));
+ EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
+ EXPECT_EQ(80 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bbc9c3e..bace25c 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -33,7 +33,7 @@
#include "output_stream.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -331,9 +331,6 @@
if (compiled_method != nullptr) {
// Derived from CompiledMethod.
uint32_t quick_code_offset = 0;
- uint32_t frame_size_in_bytes = kStackAlignment;
- uint32_t core_spill_mask = 0;
- uint32_t fp_spill_mask = 0;
const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
@@ -351,7 +348,7 @@
uint32_t code_size = quick_code->size() * sizeof(uint8_t);
CHECK_NE(code_size, 0U);
uint32_t thumb_offset = compiled_method->CodeDelta();
- quick_code_offset = offset_ + sizeof(OatMethodHeader) + thumb_offset;
+ quick_code_offset = offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
std::vector<uint8_t>* cfi_info = writer_->compiler_driver_->GetCallFrameInformation();
if (cfi_info != nullptr) {
@@ -374,27 +371,45 @@
}
}
- DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
- OatMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
- method_header->code_size_ = code_size;
-
// Deduplicate code arrays.
auto code_iter = dedupe_map_.find(compiled_method);
if (code_iter != dedupe_map_.end()) {
quick_code_offset = code_iter->second;
- FixupMethodHeader(method_header, quick_code_offset - thumb_offset);
} else {
dedupe_map_.Put(compiled_method, quick_code_offset);
- FixupMethodHeader(method_header, quick_code_offset - thumb_offset);
+ }
+
+ // Update quick method header.
+ DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
+ OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
+ uint32_t mapping_table_offset = method_header->mapping_table_offset_;
+ uint32_t vmap_table_offset = method_header->vmap_table_offset_;
+ // The code offset was 0 when the mapping/vmap table offset was set, so it's set
+ // to 0-offset and we need to adjust it by code_offset.
+ uint32_t code_offset = quick_code_offset - thumb_offset;
+ if (mapping_table_offset != 0u) {
+ mapping_table_offset += code_offset;
+ DCHECK_LT(mapping_table_offset, code_offset);
+ }
+ if (vmap_table_offset != 0u) {
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
+ uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
+ uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
+ uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
+ *method_header = OatQuickMethodHeader(mapping_table_offset, vmap_table_offset,
+ frame_size_in_bytes, core_spill_mask, fp_spill_mask,
+ code_size);
+
+ // Update checksum if this wasn't a duplicate.
+ if (code_iter == dedupe_map_.end()) {
writer_->oat_header_->UpdateChecksum(method_header, sizeof(*method_header));
offset_ += sizeof(*method_header); // Method header is prepended before code.
writer_->oat_header_->UpdateChecksum(&(*quick_code)[0], code_size);
offset_ += code_size;
}
}
- frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- core_spill_mask = compiled_method->GetCoreSpillMask();
- fp_spill_mask = compiled_method->GetFpSpillMask();
if (kIsDebugBuild) {
// We expect GC maps except when the class hasn't been verified or the method is native.
@@ -421,9 +436,6 @@
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
OatMethodOffsets* offsets = &oat_class->method_offsets_[method_offsets_index_];
offsets->code_offset_ = quick_code_offset;
- offsets->frame_size_in_bytes_ = frame_size_in_bytes;
- offsets->core_spill_mask_ = core_spill_mask;
- offsets->fp_spill_mask_ = fp_spill_mask;
++method_offsets_index_;
}
@@ -431,19 +443,6 @@
}
private:
- static void FixupMethodHeader(OatMethodHeader* method_header, uint32_t code_offset) {
- // The code offset was 0 when the mapping/vmap table offset was set, so it's set
- // to 0-offset and we need to adjust it by code_offset.
- if (method_header->mapping_table_offset_ != 0u) {
- method_header->mapping_table_offset_ += code_offset;
- DCHECK_LT(method_header->mapping_table_offset_, code_offset);
- }
- if (method_header->vmap_table_offset_ != 0u) {
- method_header->vmap_table_offset_ += code_offset;
- DCHECK_LT(method_header->vmap_table_offset_, code_offset);
- }
- }
-
// Deduplication is already done on a pointer basis by the compiler driver,
// so we can simply compare the pointers to find out if things are duplicated.
SafeMap<const CompiledMethod*, uint32_t, CodeOffsetsKeyComparator> dedupe_map_;
@@ -501,55 +500,23 @@
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- OatMethodOffsets offsets(0u, kStackAlignment, 0u, 0u, 0u);
+ OatMethodOffsets offsets(0u, 0u);
if (compiled_method != nullptr) {
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
offsets = oat_class->method_offsets_[method_offsets_index_];
++method_offsets_index_;
}
- // Derive frame size and spill masks for native methods without code:
- // These are generic JNI methods...
- uint32_t method_idx = it.GetMemberIndex();
- bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
- if (is_native && compiled_method == nullptr) {
- // Compute Sirt size as putting _every_ reference into it, even null ones.
- uint32_t s_len;
- const char* shorty = dex_file_->GetMethodShorty(dex_file_->GetMethodId(method_idx),
- &s_len);
- DCHECK(shorty != nullptr);
- uint32_t refs = 1; // Native method always has "this" or class.
- for (uint32_t i = 1; i < s_len; ++i) {
- if (shorty[i] == 'L') {
- refs++;
- }
- }
- size_t pointer_size = GetInstructionSetPointerSize(
- writer_->compiler_driver_->GetInstructionSet());
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(pointer_size, refs);
-
- // Get the generic spill masks and base frame size.
- mirror::ArtMethod* callee_save_method =
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
- offsets.frame_size_in_bytes_ = callee_save_method->GetFrameSizeInBytes() + sirt_size;
- offsets.core_spill_mask_ = callee_save_method->GetCoreSpillMask();
- offsets.fp_spill_mask_ = callee_save_method->GetFpSpillMask();
- DCHECK_EQ(offsets.gc_map_offset_, 0u);
- }
-
ClassLinker* linker = Runtime::Current()->GetClassLinker();
InvokeType invoke_type = it.GetMethodInvokeType(dex_file_->GetClassDef(class_def_index_));
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(*dex_file_));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
- mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, method_idx, dex_cache,
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file_)));
+ auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
+ mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, it.GetMemberIndex(), dex_cache,
class_loader, nullptr, invoke_type);
CHECK(method != NULL);
- method->SetFrameSizeInBytes(offsets.frame_size_in_bytes_);
- method->SetCoreSpillMask(offsets.core_spill_mask_);
- method->SetFpSpillMask(offsets.fp_spill_mask_);
// Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
method->SetQuickOatCodeOffset(offsets.code_offset_);
method->SetOatNativeGcMapOffset(offsets.gc_map_offset_);
@@ -601,10 +568,10 @@
// Deduplicate code arrays.
const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_];
DCHECK(method_offsets.code_offset_ < offset_ || method_offsets.code_offset_ ==
- offset_ + sizeof(OatMethodHeader) + compiled_method->CodeDelta())
+ offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta())
<< PrettyMethod(it.GetMemberIndex(), *dex_file_);
if (method_offsets.code_offset_ >= offset_) {
- const OatMethodHeader& method_header = oat_class->method_headers_[method_offsets_index_];
+ const OatQuickMethodHeader& method_header = oat_class->method_headers_[method_offsets_index_];
if (!out->WriteFully(&method_header, sizeof(method_header))) {
ReportWriteFailure("method header", it);
return false;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 7cdd532..7a41d87 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -26,7 +26,7 @@
#include "oat.h"
#include "mirror/class.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
@@ -231,7 +231,7 @@
// oat_method_offsets_offsets_from_oat_class_ should contain 0
// values in this case).
std::vector<OatMethodOffsets> method_offsets_;
- std::vector<OatMethodHeader> method_headers_;
+ std::vector<OatQuickMethodHeader> method_headers_;
private:
DISALLOW_COPY_AND_ASSIGN(OatClass);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index b0aa63b..2c2564d 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -311,6 +311,10 @@
}
}
+ if (return_type == Primitive::kPrimDouble || return_type == Primitive::kPrimFloat) {
+ return false;
+ }
+
DCHECK_EQ(argument_index, number_of_arguments);
current_block_->AddInstruction(invoke);
return true;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index bbebd3a..beafbcc 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -47,7 +47,7 @@
Bind(GetLabelOf(block));
HGraphVisitor* location_builder = GetLocationBuilder();
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
- for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
current->Accept(location_builder);
InitLocations(current);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
new file mode 100644
index 0000000..b9c1164
--- /dev/null
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph_visualizer.h"
+
+#include "driver/dex_compilation_unit.h"
+#include "nodes.h"
+
+namespace art {
+
+/**
+ * HGraph visitor to generate a file suitable for the c1visualizer tool and IRHydra.
+ */
+class HGraphVisualizerPrinter : public HGraphVisitor {
+ public:
+ HGraphVisualizerPrinter(HGraph* graph, std::ostream& output)
+ : HGraphVisitor(graph), output_(output), indent_(0) {}
+
+ void StartTag(const char* name) {
+ AddIndent();
+ output_ << "begin_" << name << std::endl;
+ indent_++;
+ }
+
+ void EndTag(const char* name) {
+ indent_--;
+ AddIndent();
+ output_ << "end_" << name << std::endl;
+ }
+
+ void PrintProperty(const char* name, const char* property) {
+ AddIndent();
+ output_ << name << " \"" << property << "\"" << std::endl;
+ }
+
+ void PrintProperty(const char* name, const char* property, int id) {
+ AddIndent();
+ output_ << name << " \"" << property << id << "\"" << std::endl;
+ }
+
+ void PrintEmptyProperty(const char* name) {
+ AddIndent();
+ output_ << name << std::endl;
+ }
+
+ void PrintTime(const char* name) {
+ AddIndent();
+ output_ << name << " " << time(NULL) << std::endl;
+ }
+
+ void PrintInt(const char* name, int value) {
+ AddIndent();
+ output_ << name << " " << value << std::endl;
+ }
+
+ void AddIndent() {
+ for (size_t i = 0; i < indent_; ++i) {
+ output_ << " ";
+ }
+ }
+
+ void PrintPredecessors(HBasicBlock* block) {
+ AddIndent();
+ output_ << "predecessors";
+ for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
+ HBasicBlock* predecessor = block->GetPredecessors().Get(i);
+ output_ << " \"B" << predecessor->GetBlockId() << "\" ";
+ }
+ output_<< std::endl;
+ }
+
+ void PrintSuccessors(HBasicBlock* block) {
+ AddIndent();
+ output_ << "successors";
+ for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
+ HBasicBlock* successor = block->GetSuccessors().Get(i);
+ output_ << " \"B" << successor->GetBlockId() << "\" ";
+ }
+ output_<< std::endl;
+ }
+
+
+ void VisitInstruction(HInstruction* instruction) {
+ output_ << instruction->DebugName();
+ if (instruction->InputCount() > 0) {
+ output_ << " [ ";
+ for (HInputIterator inputs(instruction); !inputs.Done(); inputs.Advance()) {
+ output_ << "v" << inputs.Current()->GetId() << " ";
+ }
+ output_ << "]";
+ }
+ }
+
+ void PrintInstructions(const HInstructionList& list) {
+ const char* kEndInstructionMarker = "<|@";
+ for (HInstructionIterator it(list); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ AddIndent();
+ int bci = 0;
+ output_ << bci << " " << instruction->NumberOfUses() << " v" << instruction->GetId() << " ";
+ instruction->Accept(this);
+ output_ << kEndInstructionMarker << std::endl;
+ }
+ }
+
+ void Run(const char* pass_name) {
+ StartTag("cfg");
+ PrintProperty("name", pass_name);
+ VisitInsertionOrder();
+ EndTag("cfg");
+ }
+
+ void VisitBasicBlock(HBasicBlock* block) {
+ StartTag("block");
+ PrintProperty("name", "B", block->GetBlockId());
+ PrintInt("from_bci", -1);
+ PrintInt("to_bci", -1);
+ PrintPredecessors(block);
+ PrintSuccessors(block);
+ PrintEmptyProperty("xhandlers");
+ PrintEmptyProperty("flags");
+ if (block->GetDominator() != nullptr) {
+ PrintProperty("dominator", "B", block->GetDominator()->GetBlockId());
+ }
+
+ StartTag("states");
+ StartTag("locals");
+ PrintInt("size", 0);
+ PrintProperty("method", "None");
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ AddIndent();
+ HInstruction* instruction = it.Current();
+ output_ << instruction->GetId() << " v" << instruction->GetId() << "[ ";
+ for (HInputIterator inputs(instruction); !inputs.Done(); inputs.Advance()) {
+ output_ << inputs.Current()->GetId() << " ";
+ }
+ output_ << "]" << std::endl;
+ }
+ EndTag("locals");
+ EndTag("states");
+
+ StartTag("HIR");
+ PrintInstructions(block->GetPhis());
+ PrintInstructions(block->GetInstructions());
+ EndTag("HIR");
+ EndTag("block");
+ }
+
+ private:
+ std::ostream& output_;
+ size_t indent_;
+
+ DISALLOW_COPY_AND_ASSIGN(HGraphVisualizerPrinter);
+};
+
+HGraphVisualizer::HGraphVisualizer(std::ostream* output,
+ HGraph* graph,
+ const char* string_filter,
+ const DexCompilationUnit& cu)
+ : output_(output), graph_(graph), is_enabled_(false) {
+ if (output == nullptr) {
+ return;
+ }
+ std::string pretty_name = PrettyMethod(cu.GetDexMethodIndex(), *cu.GetDexFile());
+ if (pretty_name.find(string_filter) == std::string::npos) {
+ return;
+ }
+
+ is_enabled_ = true;
+ HGraphVisualizerPrinter printer(graph, *output_);
+ printer.StartTag("compilation");
+ printer.PrintProperty("name", pretty_name.c_str());
+ printer.PrintProperty("method", pretty_name.c_str());
+ printer.PrintTime("date");
+ printer.EndTag("compilation");
+}
+
+HGraphVisualizer::HGraphVisualizer(std::ostream* output,
+ HGraph* graph,
+ const char* name)
+ : output_(output), graph_(graph), is_enabled_(false) {
+ if (output == nullptr) {
+ return;
+ }
+
+ is_enabled_ = true;
+ HGraphVisualizerPrinter printer(graph, *output_);
+ printer.StartTag("compilation");
+ printer.PrintProperty("name", name);
+ printer.PrintProperty("method", name);
+ printer.PrintTime("date");
+ printer.EndTag("compilation");
+}
+
+void HGraphVisualizer::DumpGraph(const char* pass_name) {
+ if (!is_enabled_) {
+ return;
+ }
+ HGraphVisualizerPrinter printer(graph_, *output_);
+ printer.Run(pass_name);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
new file mode 100644
index 0000000..2b88e65
--- /dev/null
+++ b/compiler/optimizing/graph_visualizer.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
+#define ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
+
+#include "utils/allocation.h"
+
+namespace art {
+
+class DexCompilationUnit;
+class HGraph;
+
+/**
+ * If enabled, emits compilation information suitable for the c1visualizer tool
+ * and IRHydra.
+ * Currently only works if the compiler is single threaded.
+ */
+class HGraphVisualizer : public ValueObject {
+ public:
+ /**
+ * If output is not null, and the method name of the dex compilation
+ * unit contains `string_filter`, the compilation information will be
+ * emitted.
+ */
+ HGraphVisualizer(std::ostream* output,
+ HGraph* graph,
+ const char* string_filter,
+ const DexCompilationUnit& cu);
+
+ /**
+ * Version of `HGraphVisualizer` for unit testing, that is when a
+ * `DexCompilationUnit` is not available.
+ */
+ HGraphVisualizer(std::ostream* output, HGraph* graph, const char* name);
+
+ /**
+ * If this visualizer is enabled, emit the compilation information
+ * in `output_`.
+ */
+ void DumpGraph(const char* pass_name);
+
+ private:
+ std::ostream* const output_;
+ HGraph* const graph_;
+
+ // Is true when `output_` is not null, and the compiled method's name
+ // contains the string_filter given in the constructor.
+ bool is_enabled_;
+
+ DISALLOW_COPY_AND_ASSIGN(HGraphVisualizer);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
new file mode 100644
index 0000000..f9ae529
--- /dev/null
+++ b/compiler/optimizing/linearize_test.cc
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+
+#include "base/stringprintf.h"
+#include "builder.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "graph_visualizer.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "pretty_printer.h"
+#include "ssa_builder.h"
+#include "ssa_liveness_analysis.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+static void TestCode(const uint16_t* data, const int* expected_order, size_t number_of_blocks) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraphBuilder builder(&allocator);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = builder.BuildGraph(*item);
+ ASSERT_NE(graph, nullptr);
+
+ graph->BuildDominatorTree();
+ graph->FindNaturalLoops();
+ SsaLivenessAnalysis liveness(*graph);
+ liveness.Analyze();
+
+ ASSERT_EQ(liveness.GetLinearPostOrder().Size(), number_of_blocks);
+ for (size_t i = 0; i < number_of_blocks; ++i) {
+ ASSERT_EQ(liveness.GetLinearPostOrder().Get(number_of_blocks - i - 1)->GetBlockId(),
+ expected_order[i]);
+ }
+}
+
+TEST(LinearizeTest, CFG1) {
+ // Structure of this graph (+ are back edges)
+ // Block0
+ // |
+ // Block1
+ // |
+ // Block2 ++++++
+ // / \ +
+ // Block5 Block7 +
+ // | | +
+ // Block6 Block3 +
+ // + / \ +
+ // Block4 Block8
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 5,
+ Instruction::IF_EQ, 0xFFFE,
+ Instruction::GOTO | 0xFE00,
+ Instruction::RETURN_VOID);
+
+ const int blocks[] = {0, 1, 2, 7, 3, 4, 8, 5, 6};
+ TestCode(data, blocks, 9);
+}
+
+TEST(LinearizeTest, CFG2) {
+ // Structure of this graph (+ are back edges)
+ // Block0
+ // |
+ // Block1
+ // |
+ // Block2 ++++++
+ // / \ +
+ // Block3 Block7 +
+ // | | +
+ // Block6 Block4 +
+ // + / \ +
+ // Block5 Block8
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::RETURN_VOID,
+ Instruction::IF_EQ, 0xFFFD,
+ Instruction::GOTO | 0xFE00);
+
+ const int blocks[] = {0, 1, 2, 7, 4, 5, 8, 3, 6};
+ TestCode(data, blocks, 9);
+}
+
+TEST(LinearizeTest, CFG3) {
+ // Structure of this graph (+ are back edges)
+ // Block0
+ // |
+ // Block1
+ // |
+ // Block2 ++++++
+ // / \ +
+ // Block3 Block8 +
+ // | | +
+ // Block7 Block5 +
+ // / + \ +
+ // Block6 + Block9
+ // | +
+ // Block4 ++
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 4,
+ Instruction::RETURN_VOID,
+ Instruction::GOTO | 0x0100,
+ Instruction::IF_EQ, 0xFFFC,
+ Instruction::GOTO | 0xFD00);
+
+ const int blocks[] = {0, 1, 2, 8, 5, 6, 4, 9, 3, 7};
+ TestCode(data, blocks, 10);
+}
+
+TEST(LinearizeTest, CFG4) {
+ /* Structure of this graph (+ are back edges)
+ // Block0
+ // |
+ // Block1
+ // |
+ // Block2
+ // / + \
+ // Block6 + Block8
+ // | + |
+ // Block7 + Block3 +++++++
+ // + / \ +
+ // Block9 Block10 +
+ // | +
+ // Block4 +
+ // + / \ +
+ // Block5 Block11
+ */
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 7,
+ Instruction::IF_EQ, 0xFFFE,
+ Instruction::IF_EQ, 0xFFFE,
+ Instruction::GOTO | 0xFE00,
+ Instruction::RETURN_VOID);
+
+ const int blocks[] = {0, 1, 2, 8, 3, 10, 4, 5, 11, 9, 6, 7};
+ TestCode(data, blocks, 12);
+}
+
+TEST(LinearizeTest, CFG5) {
+ /* Structure of this graph (+ are back edges)
+ // Block0
+ // |
+ // Block1
+ // |
+ // Block2
+ // / + \
+ // Block3 + Block8
+ // | + |
+ // Block7 + Block4 +++++++
+ // + / \ +
+ // Block9 Block10 +
+ // | +
+ // Block5 +
+ // +/ \ +
+ // Block6 Block11
+ */
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::RETURN_VOID,
+ Instruction::IF_EQ, 0xFFFD,
+ Instruction::IF_EQ, 0xFFFE,
+ Instruction::GOTO | 0xFE00);
+
+ const int blocks[] = {0, 1, 2, 8, 4, 10, 5, 6, 11, 9, 3, 7};
+ TestCode(data, blocks, 12);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index d665ab9..53e7bbe 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -35,6 +35,7 @@
ASSERT_NE(graph, nullptr);
graph->BuildDominatorTree();
graph->TransformToSSA();
+ graph->FindNaturalLoops();
SsaLivenessAnalysis liveness(*graph);
liveness.Analyze();
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index cf2d1ee..afaedd7 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -37,10 +37,10 @@
for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
block->GetSuccessors().Get(j)->RemovePredecessor(block, false);
}
- for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
block->RemovePhi(it.Current()->AsPhi());
}
- for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
block->RemoveInstruction(it.Current());
}
}
@@ -420,10 +420,10 @@
}
void HGraphVisitor::VisitBasicBlock(HBasicBlock* block) {
- for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
it.Current()->Accept(this);
}
- for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
it.Current()->Accept(this);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 081c2bd..1085c10 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -82,7 +82,7 @@
void SimplifyCFG();
// Find all natural loops in this graph. Aborts computation and returns false
- // if one loop is not natural, that is the header does not dominated the back
+ // if one loop is not natural, that is the header does not dominate the back
// edge.
bool FindNaturalLoops() const;
@@ -268,8 +268,8 @@
HInstruction* GetFirstInstruction() const { return instructions_.first_instruction_; }
HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; }
- HInstructionList const* GetInstructions() const { return &instructions_; }
- HInstructionList const* GetPhis() const { return &phis_; }
+ const HInstructionList& GetInstructions() const { return instructions_; }
+ const HInstructionList& GetPhis() const { return phis_; }
void AddSuccessor(HBasicBlock* block) {
successors_.Add(block);
@@ -444,6 +444,17 @@
bool HasUses() const { return uses_ != nullptr || env_uses_ != nullptr; }
+ size_t NumberOfUses() const {
+ // TODO: Optimize this method if it is used outside of the HGraphVisualizer.
+ size_t result = 0;
+ HUseListNode<HInstruction>* current = uses_;
+ while (current != nullptr) {
+ current = current->GetTail();
+ ++result;
+ }
+ return result;
+ }
+
int GetId() const { return id_; }
void SetId(int id) { id_ = id; }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a5031e0..f435cb0 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <fstream>
#include <stdint.h>
#include "builder.h"
@@ -21,6 +22,7 @@
#include "compilers.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
+#include "graph_visualizer.h"
#include "nodes.h"
#include "ssa_liveness_analysis.h"
#include "utils/arena_allocator.h"
@@ -50,6 +52,22 @@
DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
};
+/**
+ * If set to true, generates a file suitable for the c1visualizer tool and IRHydra.
+ */
+static bool kIsVisualizerEnabled = false;
+
+/**
+ * Filter to apply to the visualizer. Methods whose name contain that filter will
+ * be in the file.
+ */
+static const char* kStringFilter = "";
+
+OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : QuickCompiler(driver) {
+ if (kIsVisualizerEnabled) {
+ visualizer_output_.reset(new std::ofstream("art.cfg"));
+ }
+}
CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -70,6 +88,7 @@
ArenaPool pool;
ArenaAllocator arena(&pool);
HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file);
+
HGraph* graph = builder.BuildGraph(*code_item);
if (graph == nullptr) {
if (shouldCompile) {
@@ -77,6 +96,8 @@
}
return nullptr;
}
+ HGraphVisualizer visualizer(visualizer_output_.get(), graph, kStringFilter, dex_compilation_unit);
+ visualizer.DumpGraph("builder");
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// The optimizing compiler currently does not have a Thumb2 assembler.
@@ -104,6 +125,8 @@
// Run these phases to get some test coverage.
graph->BuildDominatorTree();
graph->TransformToSSA();
+ visualizer.DumpGraph("ssa");
+
graph->FindNaturalLoops();
SsaLivenessAnalysis(*graph).Analyze();
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index dfeafe7..a7727c0 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -100,6 +100,47 @@
DISALLOW_COPY_AND_ASSIGN(HPrettyPrinter);
};
+class StringPrettyPrinter : public HPrettyPrinter {
+ public:
+ explicit StringPrettyPrinter(HGraph* graph)
+ : HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
+
+ virtual void PrintInt(int value) {
+ str_ += StringPrintf("%d", value);
+ }
+
+ virtual void PrintString(const char* value) {
+ str_ += value;
+ }
+
+ virtual void PrintNewLine() {
+ str_ += '\n';
+ }
+
+ void Clear() { str_.clear(); }
+
+ std::string str() const { return str_; }
+
+ virtual void VisitBasicBlock(HBasicBlock* block) {
+ current_block_ = block;
+ HPrettyPrinter::VisitBasicBlock(block);
+ }
+
+ virtual void VisitGoto(HGoto* gota) {
+ PrintString(" ");
+ PrintInt(gota->GetId());
+ PrintString(": Goto ");
+ PrintInt(current_block_->GetSuccessors().Get(0)->GetBlockId());
+ PrintNewLine();
+ }
+
+ private:
+ std::string str_;
+ HBasicBlock* current_block_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringPrettyPrinter);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_PRETTY_PRINTER_H_
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 006349c..7e604e9 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -27,47 +27,6 @@
namespace art {
-class StringPrettyPrinter : public HPrettyPrinter {
- public:
- explicit StringPrettyPrinter(HGraph* graph)
- : HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
-
- virtual void PrintInt(int value) {
- str_ += StringPrintf("%d", value);
- }
-
- virtual void PrintString(const char* value) {
- str_ += value;
- }
-
- virtual void PrintNewLine() {
- str_ += '\n';
- }
-
- void Clear() { str_.clear(); }
-
- std::string str() const { return str_; }
-
- virtual void VisitBasicBlock(HBasicBlock* block) {
- current_block_ = block;
- HPrettyPrinter::VisitBasicBlock(block);
- }
-
- virtual void VisitGoto(HGoto* gota) {
- PrintString(" ");
- PrintInt(gota->GetId());
- PrintString(": Goto ");
- PrintInt(current_block_->GetSuccessors().Get(0)->GetBlockId());
- PrintNewLine();
- }
-
- private:
- std::string str_;
- HBasicBlock* current_block_;
-
- DISALLOW_COPY_AND_ASSIGN(StringPrettyPrinter);
-};
-
static void TestCode(const uint16_t* data, const char* expected) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 1fc041c..50e3254 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -30,7 +30,7 @@
// 2) Set inputs of loop phis.
for (size_t i = 0; i < loop_headers_.Size(); i++) {
HBasicBlock* block = loop_headers_.Get(i);
- for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
HPhi* phi = it.Current()->AsPhi();
for (size_t pred = 0; pred < block->GetPredecessors().Size(); pred++) {
phi->AddInput(ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber()));
@@ -40,7 +40,7 @@
// 3) Clear locals.
// TODO: Move this to a dead code eliminator phase.
- for (HInstructionIterator it(*GetGraph()->GetEntryBlock()->GetInstructions());
+ for (HInstructionIterator it(GetGraph()->GetEntryBlock()->GetInstructions());
!it.Done();
it.Advance()) {
HInstruction* current = it.Current();
@@ -106,7 +106,7 @@
// - HStoreLocal: update current value of the local and remove the instruction.
// - Instructions that require an environment: populate their environment
// with the current values of the locals.
- for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
it.Current()->Accept(this);
}
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 0ab77ca..85171aa 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -20,23 +20,102 @@
namespace art {
void SsaLivenessAnalysis::Analyze() {
+ LinearizeGraph();
NumberInstructions();
ComputeSets();
}
+static bool IsLoopExit(HLoopInformation* current, HLoopInformation* to) {
+ // `to` is either not part of a loop, or `current` is an inner loop of `to`.
+ return to == nullptr || (current != to && current->IsIn(*to));
+}
+
+static bool IsLoop(HLoopInformation* info) {
+ return info != nullptr;
+}
+
+static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
+ return first_loop == second_loop;
+}
+
+static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
+ return (inner != outer)
+ && (inner != nullptr)
+ && (outer != nullptr)
+ && inner->IsIn(*outer);
+}
+
+static void VisitBlockForLinearization(HBasicBlock* block,
+ GrowableArray<HBasicBlock*>* order,
+ ArenaBitVector* visited) {
+ if (visited->IsBitSet(block->GetBlockId())) {
+ return;
+ }
+ visited->SetBit(block->GetBlockId());
+ size_t number_of_successors = block->GetSuccessors().Size();
+ if (number_of_successors == 0) {
+ // Nothing to do.
+ } else if (number_of_successors == 1) {
+ VisitBlockForLinearization(block->GetSuccessors().Get(0), order, visited);
+ } else {
+ DCHECK_EQ(number_of_successors, 2u);
+ HBasicBlock* first_successor = block->GetSuccessors().Get(0);
+ HBasicBlock* second_successor = block->GetSuccessors().Get(1);
+ HLoopInformation* my_loop = block->GetLoopInformation();
+ HLoopInformation* first_loop = first_successor->GetLoopInformation();
+ HLoopInformation* second_loop = second_successor->GetLoopInformation();
+
+ if (!IsLoop(my_loop)) {
+ // Nothing to do. Current order is fine.
+ } else if (IsLoopExit(my_loop, second_loop) && InSameLoop(my_loop, first_loop)) {
+ // Visit the loop exit first in post order.
+ std::swap(first_successor, second_successor);
+ } else if (IsInnerLoop(my_loop, first_loop) && !IsInnerLoop(my_loop, second_loop)) {
+ // Visit the inner loop last in post order.
+ std::swap(first_successor, second_successor);
+ }
+ VisitBlockForLinearization(first_successor, order, visited);
+ VisitBlockForLinearization(second_successor, order, visited);
+ }
+ order->Add(block);
+}
+
+class HLinearOrderIterator : public ValueObject {
+ public:
+ explicit HLinearOrderIterator(const GrowableArray<HBasicBlock*>& post_order)
+ : post_order_(post_order), index_(post_order.Size()) {}
+
+ bool Done() const { return index_ == 0; }
+ HBasicBlock* Current() const { return post_order_.Get(index_ -1); }
+ void Advance() { --index_; DCHECK_GE(index_, 0U); }
+
+ private:
+ const GrowableArray<HBasicBlock*>& post_order_;
+ size_t index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLinearOrderIterator);
+};
+
+void SsaLivenessAnalysis::LinearizeGraph() {
+ // For simplicity of the implementation, we create post linear order. The order for
+ // computing live ranges is the reverse of that order.
+ ArenaBitVector visited(graph_.GetArena(), graph_.GetBlocks().Size(), false);
+ VisitBlockForLinearization(graph_.GetEntryBlock(), &linear_post_order_, &visited);
+}
+
void SsaLivenessAnalysis::NumberInstructions() {
int ssa_index = 0;
- for (HReversePostOrderIterator it(graph_); !it.Done(); it.Advance()) {
+ for (HLinearOrderIterator it(linear_post_order_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->HasUses()) {
current->SetSsaIndex(ssa_index++);
}
}
- for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->HasUses()) {
current->SetSsaIndex(ssa_index++);
@@ -47,7 +126,7 @@
}
void SsaLivenessAnalysis::ComputeSets() {
- for (HReversePostOrderIterator it(graph_); !it.Done(); it.Advance()) {
+ for (HLinearOrderIterator it(linear_post_order_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
block_infos_.Put(
block->GetBlockId(),
@@ -73,7 +152,7 @@
BitVector* kill = GetKillSet(*block);
BitVector* live_in = GetLiveInSet(*block);
- for (HBackwardInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->HasSsaIndex()) {
kill->SetBit(current->GetSsaIndex());
@@ -99,7 +178,7 @@
}
}
- for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->HasSsaIndex()) {
kill->SetBit(current->GetSsaIndex());
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 6a901d1..b8695ba 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -48,6 +48,7 @@
public:
explicit SsaLivenessAnalysis(const HGraph& graph)
: graph_(graph),
+ linear_post_order_(graph.GetArena(), graph.GetBlocks().Size()),
block_infos_(graph.GetArena(), graph.GetBlocks().Size()),
number_of_ssa_values_(0) {
block_infos_.SetSize(graph.GetBlocks().Size());
@@ -67,7 +68,17 @@
return &block_infos_.Get(block.GetBlockId())->kill_;
}
+ const GrowableArray<HBasicBlock*>& GetLinearPostOrder() const {
+ return linear_post_order_;
+ }
+
private:
+ // Linearize the graph so that:
+ // (1): a block is always after its dominator,
+ // (2): blocks of loops are contiguous.
+ // This creates a natural and efficient ordering when visualizing live ranges.
+ void LinearizeGraph();
+
// Give an SSA number to each instruction that defines a value used by another instruction.
void NumberInstructions();
@@ -90,6 +101,7 @@
bool UpdateLiveOut(const HBasicBlock& block);
const HGraph& graph_;
+ GrowableArray<HBasicBlock*> linear_post_order_;
GrowableArray<BlockInfo*> block_infos_;
size_t number_of_ssa_values_;
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 9be2197..d104619 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -28,9 +28,9 @@
namespace art {
-class StringPrettyPrinter : public HPrettyPrinter {
+class SsaPrettyPrinter : public HPrettyPrinter {
public:
- explicit StringPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
+ explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
virtual void PrintInt(int value) {
str_ += StringPrintf("%d", value);
@@ -59,17 +59,17 @@
private:
std::string str_;
- DISALLOW_COPY_AND_ASSIGN(StringPrettyPrinter);
+ DISALLOW_COPY_AND_ASSIGN(SsaPrettyPrinter);
};
static void ReNumberInstructions(HGraph* graph) {
int id = 0;
for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
HBasicBlock* block = graph->GetBlocks().Get(i);
- for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
it.Current()->SetId(id++);
}
- for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
it.Current()->SetId(id++);
}
}
@@ -82,11 +82,12 @@
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
ASSERT_NE(graph, nullptr);
+
graph->BuildDominatorTree();
graph->TransformToSSA();
ReNumberInstructions(graph);
- StringPrettyPrinter printer(graph);
+ SsaPrettyPrinter printer(graph);
printer.VisitInsertionOrder();
ASSERT_STREQ(expected, printer.str().c_str());
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 5c839dd..64685c1 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1752,53 +1752,53 @@
#endif
}
-void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
}
-void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index f5be04a..396e603 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -521,20 +521,20 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index f486b3c..27188b2 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -539,52 +539,52 @@
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
-void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt_offs,
+void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
ManagedRegister m_in_reg, bool null_allowed) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- // For now we only hold stale sirt entries in x registers.
+ // For now we only hold stale handle scope entries in x registers.
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP,
- sirt_offs.Int32Value());
+ handle_scope_offs.Int32Value());
in_reg = out_reg;
}
___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
}
}
-void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
ManagedRegister m_scratch, bool null_allowed) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
// Move this logic in add constants with flags.
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void Arm64Assembler::LoadReferenceFromSirt(ManagedRegister m_out_reg,
+void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
ManagedRegister m_in_reg) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 583150c..0f4a9a4 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -26,7 +26,7 @@
#include "utils/assembler.h"
#include "offsets.h"
#include "utils.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "a64/macro-assembler-a64.h"
#include "a64/disasm-a64.h"
@@ -161,20 +161,20 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst.
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 219c87f..19239e1 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -453,20 +453,20 @@
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 9001f8a..8001dcd 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -827,8 +827,8 @@
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
-void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
@@ -836,27 +836,27 @@
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
Label null_arg;
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0);
}
EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
}
-void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -864,21 +864,21 @@
if (null_allowed) {
Label null_arg;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-// Given a SIRT entry, load the associated reference.
-void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void MipsAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 75ee8b9..216cb41 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,20 +238,20 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister mscratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 6a3efc5..0791c63 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1727,8 +1727,8 @@
#endif
}
-void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
@@ -1742,34 +1742,34 @@
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
}
-void X86Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
Store(out_off, scratch, 4);
}
-// Given a SIRT entry, load the associated reference.
-void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 057c80a..2fc6049 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -541,20 +541,20 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 8eaeae1..0ede875 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1989,8 +1989,8 @@
#endif
}
-void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
@@ -1998,7 +1998,7 @@
// Use out_reg as indicator of NULL
in_reg = out_reg;
// TODO: movzwl
- movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
CHECK(in_reg.IsCpuRegister());
CHECK(out_reg.IsCpuRegister());
@@ -2010,34 +2010,34 @@
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
}
-void X86_64Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
Store(out_off, scratch, 8);
}
-// Given a SIRT entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 87fb359..548d379 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -566,20 +566,20 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src);
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
index fd08263..e7ed9a7 100644
--- a/dalvikvm/Android.mk
+++ b/dalvikvm/Android.mk
@@ -16,7 +16,7 @@
LOCAL_PATH := $(call my-dir)
-dalvikvm_cflags := -Wall -Werror -Wextra
+dalvikvm_cflags := -Wall -Werror -Wextra -std=gnu++11
include $(CLEAR_VARS)
LOCAL_MODULE := dalvikvm
@@ -24,6 +24,7 @@
LOCAL_CPP_EXTENSION := cc
LOCAL_SRC_FILES := dalvikvm.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
+LOCAL_C_INCLUDES := art/runtime
LOCAL_SHARED_LIBRARIES := libdl libnativehelper
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_MULTILIB := both
@@ -41,9 +42,11 @@
include $(CLEAR_VARS)
LOCAL_MODULE := dalvikvm
LOCAL_MODULE_TAGS := optional
+LOCAL_CLANG := true
LOCAL_CPP_EXTENSION := cc
LOCAL_SRC_FILES := dalvikvm.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
+LOCAL_C_INCLUDES := art/runtime
LOCAL_SHARED_LIBRARIES := libnativehelper
LOCAL_LDFLAGS := -ldl -lpthread
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index 8d71a7c..582adc0 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -24,7 +24,7 @@
#include "JniInvocation.h"
#include "ScopedLocalRef.h"
#include "toStringArray.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e90006e..ad796f8 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -236,7 +236,7 @@
method_inliner_map,
thread_count));
if (!dex2oat->CreateRuntime(runtime_options, instruction_set)) {
- *p_dex2oat = NULL;
+ *p_dex2oat = nullptr;
return false;
}
*p_dex2oat = dex2oat.release();
@@ -258,9 +258,9 @@
CompilerDriver::DescriptorSet* ReadImageClassesFromFile(const char* image_classes_filename) {
UniquePtr<std::ifstream> image_classes_file(new std::ifstream(image_classes_filename,
std::ifstream::in));
- if (image_classes_file.get() == NULL) {
+ if (image_classes_file.get() == nullptr) {
LOG(ERROR) << "Failed to open image classes file " << image_classes_filename;
- return NULL;
+ return nullptr;
}
UniquePtr<CompilerDriver::DescriptorSet> result(ReadImageClasses(*image_classes_file.get()));
image_classes_file->close();
@@ -286,21 +286,21 @@
const char* image_classes_filename,
std::string* error_msg) {
UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename, error_msg));
- if (zip_archive.get() == NULL) {
- return NULL;
+ if (zip_archive.get() == nullptr) {
+ return nullptr;
}
UniquePtr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename, error_msg));
- if (zip_entry.get() == NULL) {
+ if (zip_entry.get() == nullptr) {
*error_msg = StringPrintf("Failed to find '%s' within '%s': %s", image_classes_filename,
zip_filename, error_msg->c_str());
- return NULL;
+ return nullptr;
}
UniquePtr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(image_classes_filename,
error_msg));
- if (image_classes_file.get() == NULL) {
+ if (image_classes_file.get() == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", image_classes_filename,
zip_filename, error_msg->c_str());
- return NULL;
+ return nullptr;
}
const std::string image_classes_string(reinterpret_cast<char*>(image_classes_file->Begin()),
image_classes_file->Size());
@@ -321,8 +321,8 @@
TimingLogger& timings,
CumulativeLogger& compiler_phases_timings,
std::string profile_file) {
- // SirtRef and ClassLoader creation needs to come after Runtime::Create
- jobject class_loader = NULL;
+ // Handle and ClassLoader creation needs to come after Runtime::Create
+ jobject class_loader = nullptr;
Thread* self = Thread::Current();
if (!boot_image_option.empty()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -380,7 +380,7 @@
TimingLogger::ScopedSplit split("Writing ELF", &timings);
if (!driver->WriteElf(android_root, is_host, dex_files, &oat_writer, oat_file)) {
LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
- return NULL;
+ return nullptr;
}
return driver.release();
@@ -404,7 +404,7 @@
}
UniquePtr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
- if (oat_file.get() == NULL) {
+ if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open ELF file: " << oat_filename;
return false;
}
@@ -444,10 +444,11 @@
return false;
}
Runtime* runtime = Runtime::Current();
+ runtime->SetInstructionSet(instruction_set);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime->HasCalleeSaveMethod(type)) {
- runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(instruction_set, type), type);
+ runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(type), type);
}
}
runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
@@ -469,7 +470,7 @@
}
std::string error_msg;
const DexFile* dex_file = DexFile::Open(parsed[i].c_str(), parsed[i].c_str(), &error_msg);
- if (dex_file == NULL) {
+ if (dex_file == nullptr) {
LOG(WARNING) << "Failed to open dex file '" << parsed[i] << "': " << error_msg;
} else {
dex_files.push_back(dex_file);
@@ -527,7 +528,7 @@
continue;
}
const DexFile* dex_file = DexFile::Open(dex_filename, dex_location, &error_msg);
- if (dex_file == NULL) {
+ if (dex_file == nullptr) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
} else {
@@ -564,8 +565,8 @@
}
shutting_down_ = false;
const char* reason = "dex2oat watch dog thread startup";
- CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_init, (&mutex_, NULL), reason);
- CHECK_WATCH_DOG_PTHREAD_CALL(pthread_cond_init, (&cond_, NULL), reason);
+ CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_init, (&mutex_, nullptr), reason);
+ CHECK_WATCH_DOG_PTHREAD_CALL(pthread_cond_init, (&cond_, nullptr), reason);
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_attr_init, (&attr_), reason);
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_create, (&pthread_, &attr_, &CallBack, this), reason);
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_attr_destroy, (&attr_), reason);
@@ -580,7 +581,7 @@
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_cond_signal, (&cond_), reason);
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_unlock, (&mutex_), reason);
- CHECK_WATCH_DOG_PTHREAD_CALL(pthread_join, (pthread_, NULL), reason);
+ CHECK_WATCH_DOG_PTHREAD_CALL(pthread_join, (pthread_, nullptr), reason);
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_cond_destroy, (&cond_), reason);
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_destroy, (&mutex_), reason);
@@ -591,7 +592,7 @@
WatchDog* self = reinterpret_cast<WatchDog*>(arg);
::art::SetThreadName("dex2oat watch dog");
self->Wait();
- return NULL;
+ return nullptr;
}
static void Message(char severity, const std::string& message) {
@@ -727,8 +728,8 @@
std::string oat_location;
int oat_fd = -1;
std::string bitcode_filename;
- const char* image_classes_zip_filename = NULL;
- const char* image_classes_filename = NULL;
+ const char* image_classes_zip_filename = nullptr;
+ const char* image_classes_filename = nullptr;
std::string image_filename;
std::string boot_image_filename;
uintptr_t image_base = 0;
@@ -738,7 +739,7 @@
Compiler::Kind compiler_kind = kUsePortableCompiler
? Compiler::kPortable
: Compiler::kQuick;
- const char* compiler_filter_string = NULL;
+ const char* compiler_filter_string = nullptr;
int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold;
int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold;
int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold;
@@ -948,7 +949,7 @@
if (android_root.empty()) {
const char* android_root_env_var = getenv("ANDROID_ROOT");
- if (android_root_env_var == NULL) {
+ if (android_root_env_var == nullptr) {
Usage("--android-root unspecified and ANDROID_ROOT not set");
}
android_root += android_root_env_var;
@@ -957,9 +958,7 @@
bool image = (!image_filename.empty());
if (!image && boot_image_filename.empty()) {
boot_image_filename += GetAndroidRoot();
- boot_image_filename += "/framework/boot-";
- boot_image_filename += GetInstructionSetString(instruction_set);
- boot_image_filename += ".art";
+ boot_image_filename += "/framework/boot.art";
}
std::string boot_image_option;
if (!boot_image_filename.empty()) {
@@ -967,15 +966,15 @@
boot_image_option += boot_image_filename;
}
- if (image_classes_filename != NULL && !image) {
+ if (image_classes_filename != nullptr && !image) {
Usage("--image-classes should only be used with --image");
}
- if (image_classes_filename != NULL && !boot_image_option.empty()) {
+ if (image_classes_filename != nullptr && !boot_image_option.empty()) {
Usage("--image-classes should not be used with --boot-image");
}
- if (image_classes_zip_filename != NULL && image_classes_filename == NULL) {
+ if (image_classes_zip_filename != nullptr && image_classes_filename == nullptr) {
Usage("--image-classes-zip should be used with --image-classes");
}
@@ -1017,7 +1016,7 @@
oat_unstripped += oat_filename;
}
- if (compiler_filter_string == NULL) {
+ if (compiler_filter_string == nullptr) {
if (instruction_set == kX86_64 || instruction_set == kMips) {
// TODO: implement/fix compilers for these architectures.
compiler_filter_string = "interpret-only";
@@ -1076,7 +1075,7 @@
oat_file.reset(new File(oat_fd, oat_location));
oat_file->DisableAutoClose();
}
- if (oat_file.get() == NULL) {
+ if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to create oat file: " << oat_location;
return EXIT_FAILURE;
}
@@ -1098,11 +1097,10 @@
}
runtime_options.push_back(std::make_pair("bootclasspath", &boot_class_path));
} else {
- runtime_options.push_back(std::make_pair(boot_image_option.c_str(),
- reinterpret_cast<void*>(NULL)));
+ runtime_options.push_back(std::make_pair(boot_image_option.c_str(), nullptr));
}
for (size_t i = 0; i < runtime_args.size(); i++) {
- runtime_options.push_back(std::make_pair(runtime_args[i], reinterpret_cast<void*>(NULL)));
+ runtime_options.push_back(std::make_pair(runtime_args[i], nullptr));
}
VerificationResults verification_results(&compiler_options);
@@ -1138,21 +1136,23 @@
WellKnownClasses::Init(self->GetJniEnv());
// If --image-classes was specified, calculate the full list of classes to include in the image
- UniquePtr<CompilerDriver::DescriptorSet> image_classes(NULL);
- if (image_classes_filename != NULL) {
+ UniquePtr<CompilerDriver::DescriptorSet> image_classes(nullptr);
+ if (image_classes_filename != nullptr) {
std::string error_msg;
- if (image_classes_zip_filename != NULL) {
+ if (image_classes_zip_filename != nullptr) {
image_classes.reset(dex2oat->ReadImageClassesFromZip(image_classes_zip_filename,
image_classes_filename,
&error_msg));
} else {
image_classes.reset(dex2oat->ReadImageClassesFromFile(image_classes_filename));
}
- if (image_classes.get() == NULL) {
+ if (image_classes.get() == nullptr) {
LOG(ERROR) << "Failed to create list of image classes from '" << image_classes_filename <<
"': " << error_msg;
return EXIT_FAILURE;
}
+ } else if (image) {
+ image_classes.reset(new CompilerDriver::DescriptorSet);
}
std::vector<const DexFile*> dex_files;
@@ -1164,13 +1164,13 @@
std::string error_msg;
UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd, zip_location.c_str(),
&error_msg));
- if (zip_archive.get() == NULL) {
+ if (zip_archive.get() == nullptr) {
LOG(ERROR) << "Failed to open zip from file descriptor for '" << zip_location << "': "
<< error_msg;
return EXIT_FAILURE;
}
const DexFile* dex_file = DexFile::Open(*zip_archive.get(), zip_location, &error_msg);
- if (dex_file == NULL) {
+ if (dex_file == nullptr) {
LOG(ERROR) << "Failed to open dex from file descriptor for zip file '" << zip_location
<< "': " << error_msg;
return EXIT_FAILURE;
@@ -1216,7 +1216,7 @@
size_t num_methods = 0;
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
num_methods += dex_file->NumMethodIds();
}
if (num_methods <= compiler_options.GetNumDexMethodsThreshold()) {
@@ -1239,7 +1239,7 @@
compiler_phases_timings,
profile_file));
- if (compiler.get() == NULL) {
+ if (compiler.get() == nullptr) {
LOG(ERROR) << "Failed to create oat file: " << oat_location;
return EXIT_FAILURE;
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index fc60c02..fef25e0 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -417,9 +417,10 @@
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(
- soa.Self(), runtime->GetClassLinker()->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(runtime->GetClassLinker()->FindDexCache(dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
verifier::MethodVerifier verifier(&dex_file, &dex_cache, &class_loader, &class_def,
code_item, dex_method_idx, nullptr, method_access_flags,
true, true);
@@ -687,11 +688,12 @@
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(
- soa.Self(), Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
verifier::MethodVerifier::VerifyMethodAndDump(os, dex_method_idx, dex_file, dex_cache,
- class_loader, &class_def, code_item, NULL,
+ class_loader, &class_def, code_item, nullptr,
method_access_flags);
}
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 72f1774..052d12e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -270,8 +270,7 @@
runtime_linux.cc \
thread_linux.cc
-ifeq ($(HOST_ARCH),x86)
-ifneq ($(BUILD_HOST_64bit),)
+ifeq ($(HOST_ARCH),x86_64)
LIBART_HOST_SRC_FILES += \
arch/x86_64/context_x86_64.cc \
arch/x86_64/entrypoints_init_x86_64.cc \
@@ -282,6 +281,7 @@
arch/x86_64/fault_handler_x86_64.cc \
monitor_pool.cc
else
+ ifeq ($(HOST_ARCH),x86)
LIBART_HOST_SRC_FILES += \
arch/x86/context_x86.cc \
arch/x86/entrypoints_init_x86.cc \
@@ -290,10 +290,10 @@
arch/x86/quick_entrypoints_x86.S \
arch/x86/fault_handler_x86.cc \
arch/x86/thread_x86.cc
-endif
-else # HOST_ARCH != x86
+ else # HOST_ARCH != x86 && HOST_ARCH != x86_64
$(error unsupported HOST_ARCH=$(HOST_ARCH))
-endif # HOST_ARCH != x86
+ endif
+endif
LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
diff --git a/runtime/UniquePtrCompat.h b/runtime/UniquePtrCompat.h
new file mode 100644
index 0000000..4a45616
--- /dev/null
+++ b/runtime/UniquePtrCompat.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_UNIQUEPTRCOMPAT_H_
+#define ART_RUNTIME_UNIQUEPTRCOMPAT_H_
+
+// Stlport doesn't declare std::unique_ptr. UniquePtr.h declares an incompatible std::swap
+// prototype with libc++. This compatibility header file resolves differences between the two, in
+// the future UniquePtr will become std::unique_ptr.
+
+#ifdef ART_WITH_STLPORT
+
+#include "UniquePtr.h"
+
+#else // ART_WITH_STLPORT
+
+#include <memory>
+
+template <typename T>
+using UniquePtr = typename std::unique_ptr<T>;
+
+#endif // ART_WITH_STLPORT
+
+#endif // ART_RUNTIME_UNIQUEPTRCOMPAT_H_
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index c285088..45ff21f 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -17,7 +17,8 @@
#include <stdint.h>
#include "common_runtime_test.h"
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "quick/quick_method_frame_info.h"
namespace art {
@@ -30,10 +31,13 @@
Thread* t = Thread::Current();
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(isa, type);
- EXPECT_EQ(save_method->GetFrameSizeInBytes(), save_size) << "Expected and real size differs for "
- << type << " core spills=" << std::hex << save_method->GetCoreSpillMask() << " fp spills="
- << save_method->GetFpSpillMask() << std::dec;
+ r->SetInstructionSet(isa);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ r->SetCalleeSaveMethod(save_method, type);
+ QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+ EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
+ << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
+ << frame_info.FpSpillMask() << std::dec;
t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
}
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 0e1b25e..6a337b3 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -16,8 +16,9 @@
#include "context_arm.h"
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
#include "stack.h"
#include "thread.h"
@@ -42,17 +43,15 @@
void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
mirror::ArtMethod* method = fr.GetMethod();
- uint32_t core_spills = method->GetCoreSpillMask();
- uint32_t fp_core_spills = method->GetFpSpillMask();
- size_t spill_count = POPCOUNT(core_spills);
- size_t fp_spill_count = POPCOUNT(fp_core_spills);
- size_t frame_size = method->GetFrameSizeInBytes();
+ const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+ size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
if (spill_count > 0) {
// Lowest number spill is farthest away, walk registers and fill into context
int j = 1;
for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
- if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
j++;
}
}
@@ -61,8 +60,9 @@
// Lowest number spill is farthest away, walk registers and fill into context
int j = 1;
for (size_t i = 0; i < kNumberOfSRegisters; i++) {
- if (((fp_core_spills >> i) & 1) != 0) {
- fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+ if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+ frame_info.FrameSizeInBytes());
j++;
}
}
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 4886561..5212576 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -399,7 +399,7 @@
@ unlocked case - r2 holds thread id with count of 0
strex r3, r2, [r0, #LOCK_WORD_OFFSET]
cbnz r3, .Lstrex_fail @ store failed, retry
- dmb ish @ full (LoadLoad) memory barrier
+ dmb ish @ full (LoadLoad|LoadStore) memory barrier
bx lr
.Lstrex_fail:
b .Lretry_lock @ unlikely forward branch, need to reload and recheck r1/r2
@@ -442,7 +442,7 @@
cmp r1, #65536
bpl .Lrecursive_thin_unlock
@ transition to unlocked, r3 holds 0
- dmb ish @ full (StoreLoad) memory barrier
+ dmb ish @ full (LoadStore|StoreStore) memory barrier
str r3, [r0, #LOCK_WORD_OFFSET]
bx lr
.Lrecursive_thin_unlock:
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
new file mode 100644
index 0000000..8d08190
--- /dev/null
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_arm.h"
+#include "runtime.h" // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace arm {
+
+static constexpr uint32_t kArmCalleeSaveRefSpills =
+ (1 << art::arm::R5) | (1 << art::arm::R6) | (1 << art::arm::R7) | (1 << art::arm::R8) |
+ (1 << art::arm::R10) | (1 << art::arm::R11);
+static constexpr uint32_t kArmCalleeSaveArgSpills =
+ (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
+static constexpr uint32_t kArmCalleeSaveAllSpills =
+ (1 << art::arm::R4) | (1 << art::arm::R9);
+static constexpr uint32_t kArmCalleeSaveFpAllSpills =
+ (1 << art::arm::S0) | (1 << art::arm::S1) | (1 << art::arm::S2) | (1 << art::arm::S3) |
+ (1 << art::arm::S4) | (1 << art::arm::S5) | (1 << art::arm::S6) | (1 << art::arm::S7) |
+ (1 << art::arm::S8) | (1 << art::arm::S9) | (1 << art::arm::S10) | (1 << art::arm::S11) |
+ (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15) |
+ (1 << art::arm::S16) | (1 << art::arm::S17) | (1 << art::arm::S18) | (1 << art::arm::S19) |
+ (1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
+ (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
+ (1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
+
+constexpr uint32_t ArmCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+ return kArmCalleeSaveRefSpills | (type == Runtime::kRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0) | (1 << art::arm::LR);
+}
+
+constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+ return type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0;
+}
+
+constexpr uint32_t ArmCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+ return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
+ POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * kArmPointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+ return QuickMethodFrameInfo(ArmCalleeSaveFrameSize(type),
+ ArmCalleeSaveCoreSpills(type),
+ ArmCalleeSaveFpSpills(type));
+}
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 0890fa9..fae44af 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -18,8 +18,9 @@
#include "context_arm64.h"
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
#include "stack.h"
#include "thread.h"
@@ -45,18 +46,15 @@
void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
mirror::ArtMethod* method = fr.GetMethod();
- uint32_t core_spills = method->GetCoreSpillMask();
- uint32_t fp_core_spills = method->GetFpSpillMask();
- size_t spill_count = POPCOUNT(core_spills);
- size_t fp_spill_count = POPCOUNT(fp_core_spills);
- size_t frame_size = method->GetFrameSizeInBytes();
-
+ const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+ size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
if (spill_count > 0) {
// Lowest number spill is farthest away, walk registers and fill into context.
int j = 1;
for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
- if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
j++;
}
}
@@ -66,8 +64,9 @@
// Lowest number spill is farthest away, walk registers and fill into context.
int j = 1;
for (size_t i = 0; i < kNumberOfDRegisters; i++) {
- if (((fp_core_spills >> i) & 1) != 0) {
- fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+ if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+ frame_info.FrameSizeInBytes());
j++;
}
}
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index c056b2f..f2050b3 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1014,7 +1014,7 @@
cbnz w1, .Lnot_unlocked // already thin locked
stxr w3, w2, [x4]
cbnz w3, .Lstrex_fail // store failed, retry
- dmb ishld // full (LoadLoad) memory barrier, TODO: acquire-release
+ dmb ishld // full (LoadLoad|LoadStore) memory barrier
ret
.Lstrex_fail:
b .Lretry_lock // unlikely forward branch, need to reload and recheck r1/r2
@@ -1058,7 +1058,7 @@
cmp w1, #65536
bpl .Lrecursive_thin_unlock
// transition to unlocked, w3 holds 0
- dmb ish // full (StoreLoad) memory barrier
+ dmb ish // full (LoadStore|StoreStore) memory barrier
str w3, [x0, #LOCK_WORD_OFFSET]
ret
.Lrecursive_thin_unlock:
@@ -1444,7 +1444,7 @@
* | RDI/Method* | <- X0
* #-------------------#
* | local ref cookie | // 4B
- * | SIRT size | // 4B
+ * | handle scope size | // 4B
* #-------------------#
* | JNI Call Stack |
* #-------------------# <--- SP on native call
@@ -1471,7 +1471,7 @@
.cfi_def_cfa_register x28
// This looks the same, but is different: this will be updated to point to the bottom
- // of the frame when the SIRT is inserted.
+ // of the frame when the handle scope is inserted.
mov xFP, sp
mov x8, #5120
@@ -1486,7 +1486,7 @@
mov x1, xFP
bl artQuickGenericJniTrampoline // (Thread*, sp)
- // Get the updated pointer. This is the bottom of the frame _with_ SIRT.
+ // Get the updated pointer. This is the bottom of the frame _with_ handle scope.
ldr xFP, [sp]
add x9, sp, #8
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
new file mode 100644
index 0000000..cb830ac
--- /dev/null
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_arm64.h"
+#include "runtime.h" // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace arm64 {
+
+// Callee saved registers
+static constexpr uint32_t kArm64CalleeSaveRefSpills =
+ (1 << art::arm64::X19) | (1 << art::arm64::X20) | (1 << art::arm64::X21) |
+ (1 << art::arm64::X22) | (1 << art::arm64::X23) | (1 << art::arm64::X24) |
+ (1 << art::arm64::X25) | (1 << art::arm64::X26) | (1 << art::arm64::X27) |
+ (1 << art::arm64::X28);
+// X0 is the method pointer. Not saved.
+static constexpr uint32_t kArm64CalleeSaveArgSpills =
+ (1 << art::arm64::X1) | (1 << art::arm64::X2) | (1 << art::arm64::X3) |
+ (1 << art::arm64::X4) | (1 << art::arm64::X5) | (1 << art::arm64::X6) |
+ (1 << art::arm64::X7);
+// TODO This is conservative. Only ALL should include the thread register.
+// The thread register is not preserved by the aapcs64.
+// LR is always saved.
+static constexpr uint32_t kArm64CalleeSaveAllSpills = 0; // (1 << art::arm64::LR);
+
+// Save callee-saved floating point registers. Rest are scratch/parameters.
+static constexpr uint32_t kArm64CalleeSaveFpArgSpills =
+ (1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
+ (1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
+ (1 << art::arm64::D6) | (1 << art::arm64::D7);
+static constexpr uint32_t kArm64CalleeSaveFpRefSpills =
+ (1 << art::arm64::D8) | (1 << art::arm64::D9) | (1 << art::arm64::D10) |
+ (1 << art::arm64::D11) | (1 << art::arm64::D12) | (1 << art::arm64::D13) |
+ (1 << art::arm64::D14) | (1 << art::arm64::D15);
+static constexpr uint32_t kArm64FpAllSpills =
+ kArm64CalleeSaveFpArgSpills |
+ (1 << art::arm64::D16) | (1 << art::arm64::D17) | (1 << art::arm64::D18) |
+ (1 << art::arm64::D19) | (1 << art::arm64::D20) | (1 << art::arm64::D21) |
+ (1 << art::arm64::D22) | (1 << art::arm64::D23) | (1 << art::arm64::D24) |
+ (1 << art::arm64::D25) | (1 << art::arm64::D26) | (1 << art::arm64::D27) |
+ (1 << art::arm64::D28) | (1 << art::arm64::D29) | (1 << art::arm64::D30) |
+ (1 << art::arm64::D31);
+
+constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+ return kArm64CalleeSaveRefSpills |
+ (type == Runtime::kRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0) | (1 << art::arm64::FP) |
+ (1 << art::arm64::X18) | (1 << art::arm64::LR);
+}
+
+constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+ return kArm64CalleeSaveFpRefSpills |
+ (type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
+ (type == Runtime::kSaveAll ? kArm64FpAllSpills : 0);
+}
+
+constexpr uint32_t Arm64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+ return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
+ POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * kArm64PointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+ return QuickMethodFrameInfo(Arm64CalleeSaveFrameSize(type),
+ Arm64CalleeSaveCoreSpills(type),
+ Arm64CalleeSaveFpSpills(type));
+}
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 0950e71..ad28891 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -16,8 +16,9 @@
#include "context_mips.h"
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
#include "stack.h"
namespace art {
@@ -41,17 +42,15 @@
void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
mirror::ArtMethod* method = fr.GetMethod();
- uint32_t core_spills = method->GetCoreSpillMask();
- uint32_t fp_core_spills = method->GetFpSpillMask();
- size_t spill_count = POPCOUNT(core_spills);
- size_t fp_spill_count = POPCOUNT(fp_core_spills);
- size_t frame_size = method->GetFrameSizeInBytes();
+ const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+ size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
if (spill_count > 0) {
// Lowest number spill is farthest away, walk registers and fill into context.
int j = 1;
for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
- if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
j++;
}
}
@@ -60,8 +59,9 @@
// Lowest number spill is farthest away, walk registers and fill into context.
int j = 1;
for (size_t i = 0; i < kNumberOfFRegisters; i++) {
- if (((fp_core_spills >> i) & 1) != 0) {
- fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+ if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+ frame_info.FrameSizeInBytes());
j++;
}
}
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
new file mode 100644
index 0000000..2a8bcf0
--- /dev/null
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_mips.h"
+#include "runtime.h" // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace mips {
+
+static constexpr uint32_t kMipsCalleeSaveRefSpills =
+ (1 << art::mips::S2) | (1 << art::mips::S3) | (1 << art::mips::S4) | (1 << art::mips::S5) |
+ (1 << art::mips::S6) | (1 << art::mips::S7) | (1 << art::mips::GP) | (1 << art::mips::FP);
+static constexpr uint32_t kMipsCalleeSaveArgSpills =
+ (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3);
+static constexpr uint32_t kMipsCalleeSaveAllSpills =
+ (1 << art::mips::S0) | (1 << art::mips::S1);
+
+constexpr uint32_t MipsCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+ return kMipsCalleeSaveRefSpills |
+ (type == Runtime::kRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
+ (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0) | (1 << art::mips::RA);
+}
+
+constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+ return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
+ (type == Runtime::kRefsAndArgs ? 0 : 3) + 1 /* Method* */) *
+ kMipsPointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+ return QuickMethodFrameInfo(MipsCalleeSaveFrameSize(type),
+ MipsCalleeSaveCoreSpills(type),
+ 0u);
+}
+
+} // namespace mips
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 76d028d..d9bc105 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -33,10 +33,11 @@
{
// Create callee-save methods
ScopedObjectAccess soa(Thread::Current());
+ runtime_->SetInstructionSet(kRuntimeISA);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(kRuntimeISA, type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
}
}
}
@@ -370,13 +371,14 @@
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::String> obj(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::String> obj(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
LockWord lock = obj->GetLockWord(false);
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
LockWord lock_after = obj->GetLockWord(false);
@@ -385,7 +387,7 @@
EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
for (size_t i = 1; i < kThinLockLoops; ++i) {
- Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
// Check we're at lock count i
@@ -397,12 +399,12 @@
}
// Force a fat lock by running identity hashcode to fill up lock word.
- SirtRef<mirror::Object> obj2(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- "hello, world!"));
+ Handle<mirror::String> obj2(hs.NewHandle(
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
obj2->IdentityHashCode();
- Invoke3(reinterpret_cast<size_t>(obj2.get()), 0U, 0U,
+ Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
LockWord lock_after2 = obj2->GetLockWord(false);
@@ -446,16 +448,16 @@
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
-
- SirtRef<mirror::String> obj(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ static constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
+ StackHandleScope<kNumberOfLocks + 1> hs(self);
+ Handle<mirror::String> obj(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
LockWord lock = obj->GetLockWord(false);
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
-
// This should be an illegal monitor state.
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -464,14 +466,14 @@
LockWord::LockState new_state = lock_after.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
LockWord lock_after2 = obj->GetLockWord(false);
LockWord::LockState new_state2 = lock_after2.GetState();
EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
- test->Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
LockWord lock_after3 = obj->GetLockWord(false);
@@ -484,20 +486,18 @@
RandGen r(0x1234);
- constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
constexpr size_t kIterations = 10000; // Number of iterations
constexpr size_t kMoveToFat = 1000; // Chance of 1:kMoveFat to make a lock fat.
size_t counts[kNumberOfLocks];
bool fat[kNumberOfLocks]; // Whether a lock should be thin or fat.
- SirtRef<mirror::String>* objects[kNumberOfLocks];
+ Handle<mirror::String> objects[kNumberOfLocks];
// Initialize = allocate.
for (size_t i = 0; i < kNumberOfLocks; ++i) {
counts[i] = 0;
fat[i] = false;
- objects[i] = new SirtRef<mirror::String>(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
+ objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
}
for (size_t i = 0; i < kIterations; ++i) {
@@ -507,9 +507,9 @@
// Make lock fat?
if (!fat[index] && (r.next() % kMoveToFat == 0)) {
fat[index] = true;
- objects[index]->get()->IdentityHashCode();
+ objects[index]->IdentityHashCode();
- LockWord lock_iter = objects[index]->get()->GetLockWord(false);
+ LockWord lock_iter = objects[index]->GetLockWord(false);
LockWord::LockState iter_state = lock_iter.GetState();
if (counts[index] == 0) {
EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
@@ -528,11 +528,11 @@
}
if (lock) {
- test-> Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
counts[index]++;
} else {
- test->Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
counts[index]--;
}
@@ -540,12 +540,12 @@
EXPECT_FALSE(self->IsExceptionPending());
// Check the new state.
- LockWord lock_iter = objects[index]->get()->GetLockWord(true);
+ LockWord lock_iter = objects[index]->GetLockWord(true);
LockWord::LockState iter_state = lock_iter.GetState();
if (fat[index]) {
// Abuse MonitorInfo.
EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
- MonitorInfo info(objects[index]->get());
+ MonitorInfo info(objects[index].Get());
EXPECT_EQ(counts[index], info.entry_count_) << index;
} else {
if (counts[index] > 0) {
@@ -559,23 +559,20 @@
}
// Unlock the remaining count times and then check it's unlocked. Then deallocate.
- // Go reverse order to correctly handle SirtRefs.
+ // Go reverse order to correctly handle Handles.
for (size_t i = 0; i < kNumberOfLocks; ++i) {
size_t index = kNumberOfLocks - 1 - i;
size_t count = counts[index];
while (count > 0) {
- test->Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
-
count--;
}
- LockWord lock_after4 = objects[index]->get()->GetLockWord(false);
+ LockWord lock_after4 = objects[index]->GetLockWord(false);
LockWord::LockState new_state4 = lock_after4.GetState();
EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
|| LockWord::LockState::kFatLocked == new_state4);
-
- delete objects[index];
}
// Test done.
@@ -601,31 +598,32 @@
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
- SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/String;"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
+ Handle<mirror::Class> c2(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c2.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c2.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_FALSE(self->IsExceptionPending());
// TODO: Make the following work. But that would require correct managed frames.
- Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -653,23 +651,22 @@
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/String;"));
- SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/String;"));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ Handle<mirror::Class> ca(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
// Build a string array of size 1
- SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.get(), 10));
+ Handle<mirror::ObjectArray<mirror::Object>> array(
+ hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), 10)));
// Build a string -> should be assignable
- SirtRef<mirror::Object> str_obj(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ Handle<mirror::String> str_obj(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
// Build a generic object -> should fail assigning
- SirtRef<mirror::Object> obj_obj(soa.Self(), c->AllocObject(soa.Self()));
+ Handle<mirror::Object> obj_obj(hs.NewHandle(c->AllocObject(soa.Self())));
// Play with it...
@@ -678,51 +675,51 @@
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(0));
+ EXPECT_EQ(str_obj.Get(), array->Get(0));
- Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(1));
+ EXPECT_EQ(str_obj.Get(), array->Get(1));
- Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(2));
+ EXPECT_EQ(str_obj.Get(), array->Get(2));
- Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(3));
+ EXPECT_EQ(str_obj.Get(), array->Get(3));
// 1.2) Assign null to array[0..3]
- Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(0));
- Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(1));
- Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(2));
- Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -734,7 +731,7 @@
// 2.1) Array = null
// TODO: Throwing NPE needs actual DEX code
-// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.get()),
+// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.Get()),
// reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
//
// EXPECT_TRUE(self->IsExceptionPending());
@@ -742,8 +739,8 @@
// 2.2) Index < 0
- Invoke3(reinterpret_cast<size_t>(array.get()), static_cast<size_t>(-1),
- reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), static_cast<size_t>(-1),
+ reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -751,7 +748,7 @@
// 2.3) Index > 0
- Invoke3(reinterpret_cast<size_t>(array.get()), 10U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 10U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -759,7 +756,7 @@
// 3) Failure cases (obj into str[])
- Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(obj_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(obj_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -784,8 +781,9 @@
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
// Play with it...
@@ -801,35 +799,35 @@
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
}
{
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectResolved),
self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
}
{
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
}
@@ -840,19 +838,21 @@
Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
// Array helps to fill memory faster.
- SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
- std::vector<SirtRef<mirror::Object>*> sirt_refs;
+ Handle<mirror::Class> ca(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
+
+ // Use arbitrary large amount for now.
+ static const size_t kMaxHandles = 1000000;
+ UniquePtr<StackHandleScope<kMaxHandles> > hsp(new StackHandleScope<kMaxHandles>(self));
+
+ std::vector<Handle<mirror::Object>> handles;
// Start allocating with 128K
size_t length = 128 * KB / 4;
while (length > 10) {
- SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
- ca.get(),
- length/4));
- if (self->IsExceptionPending() || ref->get() == nullptr) {
+ Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
+ if (self->IsExceptionPending() || h.Get() == nullptr) {
self->ClearException();
- delete ref;
// Try a smaller length
length = length / 8;
@@ -862,38 +862,26 @@
length = mem / 8;
}
} else {
- sirt_refs.push_back(ref);
+ handles.push_back(h);
}
}
- LOG(INFO) << "Used " << sirt_refs.size() << " arrays to fill space.";
+ LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
// Allocate simple objects till it fails.
while (!self->IsExceptionPending()) {
- SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
- c->AllocObject(soa.Self()));
- if (!self->IsExceptionPending() && ref->get() != nullptr) {
- sirt_refs.push_back(ref);
- } else {
- delete ref;
+ Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
+ if (!self->IsExceptionPending() && h.Get() != nullptr) {
+ handles.push_back(h);
}
}
self->ClearException();
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
self);
-
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
-
- // Release all the allocated objects.
- // Need to go backward to release SirtRef in the right order.
- auto it = sirt_refs.rbegin();
- auto end = sirt_refs.rend();
- for (; it != end; ++it) {
- delete *it;
- }
}
// Tests done.
@@ -915,12 +903,13 @@
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
// Needed to have a linked method.
- SirtRef<mirror::Class> c_obj(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
+ Handle<mirror::Class> c_obj(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
// Play with it...
@@ -940,7 +929,7 @@
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
EXPECT_EQ(obj->GetLength(), 10);
}
@@ -948,16 +937,15 @@
{
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 10U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 10U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
self);
-
EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
EXPECT_TRUE(obj->IsArrayInstance());
EXPECT_TRUE(obj->IsObjectArray());
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
EXPECT_EQ(array->GetLength(), 10);
@@ -967,7 +955,7 @@
// Out-of-memory.
{
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr),
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr),
GB, // that should fail...
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
self);
@@ -1004,32 +992,31 @@
// Use array so we can index into it and use a matrix for expected results
// Setup: The first half is standard. The second half uses a non-zero offset.
// TODO: Shared backing arrays.
- constexpr size_t base_string_count = 7;
- const char* c[base_string_count] = { "", "", "a", "aa", "ab", "aac", "aac" , };
+ static constexpr size_t kBaseStringCount = 7;
+ const char* c[kBaseStringCount] = { "", "", "a", "aa", "ab", "aac", "aac" , };
- constexpr size_t string_count = 2 * base_string_count;
+ static constexpr size_t kStringCount = 2 * kBaseStringCount;
- SirtRef<mirror::String>* s[string_count];
+ StackHandleScope<kStringCount> hs(self);
+ Handle<mirror::String> s[kStringCount];
- for (size_t i = 0; i < base_string_count; ++i) {
- s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- c[i]));
+ for (size_t i = 0; i < kBaseStringCount; ++i) {
+ s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
}
RandGen r(0x1234);
- for (size_t i = base_string_count; i < string_count; ++i) {
- s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- c[i - base_string_count]));
- int32_t length = s[i]->get()->GetLength();
+ for (size_t i = kBaseStringCount; i < kStringCount; ++i) {
+ s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i - kBaseStringCount]));
+ int32_t length = s[i]->GetLength();
if (length > 1) {
// Set a random offset and length.
int32_t new_offset = 1 + (r.next() % (length - 1));
int32_t rest = length - new_offset - 1;
int32_t new_length = 1 + (rest > 0 ? r.next() % rest : 0);
- s[i]->get()->SetField32<false>(mirror::String::CountOffset(), new_length);
- s[i]->get()->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
+ s[i]->SetField32<false>(mirror::String::CountOffset(), new_length);
+ s[i]->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
}
}
@@ -1038,20 +1025,20 @@
// Matrix of expectations. First component is first parameter. Note we only check against the
// sign, not the value. As we are testing random offsets, we need to compute this and need to
// rely on String::CompareTo being correct.
- int32_t expected[string_count][string_count];
- for (size_t x = 0; x < string_count; ++x) {
- for (size_t y = 0; y < string_count; ++y) {
- expected[x][y] = s[x]->get()->CompareTo(s[y]->get());
+ int32_t expected[kStringCount][kStringCount];
+ for (size_t x = 0; x < kStringCount; ++x) {
+ for (size_t y = 0; y < kStringCount; ++y) {
+ expected[x][y] = s[x]->CompareTo(s[y].Get());
}
}
// Play with it...
- for (size_t x = 0; x < string_count; ++x) {
- for (size_t y = 0; y < string_count; ++y) {
+ for (size_t x = 0; x < kStringCount; ++x) {
+ for (size_t y = 0; y < kStringCount; ++y) {
// Test string_compareto x y
- size_t result = Invoke3(reinterpret_cast<size_t>(s[x]->get()),
- reinterpret_cast<size_t>(s[y]->get()), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
+ reinterpret_cast<size_t>(s[y].Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_string_compareto), self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1089,7 +1076,7 @@
extern "C" void art_quick_get32_static(void);
#endif
-static void GetSet32Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
+static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
@@ -1125,7 +1112,7 @@
extern "C" void art_quick_get32_instance(void);
#endif
-static void GetSet32Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
+static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
@@ -1134,20 +1121,20 @@
for (size_t i = 0; i < num_values; ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
reinterpret_cast<uintptr_t>(&art_quick_set32_instance),
self,
referrer);
- int32_t res = f->get()->GetInt(obj->get());
+ int32_t res = f->Get()->GetInt(obj->Get());
EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
res++;
- f->get()->SetInt<false>(obj->get(), res);
+ f->Get()->SetInt<false>(obj->Get(), res);
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
0U,
reinterpret_cast<uintptr_t>(&art_quick_get32_instance),
self,
@@ -1186,7 +1173,7 @@
}
#endif
-static void GetSetObjStatic(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
+static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
@@ -1209,7 +1196,7 @@
extern "C" void art_quick_set_obj_instance(void);
extern "C" void art_quick_get_obj_instance(void);
-static void set_and_check_instance(SirtRef<mirror::ArtField>* f, mirror::Object* trg,
+static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object* trg,
mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1229,21 +1216,21 @@
EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
- EXPECT_EQ(val, f->get()->GetObj(trg));
+ EXPECT_EQ(val, f->Get()->GetObj(trg));
}
#endif
-static void GetSetObjInstance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
+static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
- set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
+ set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
// Allocate a string object for simplicity.
mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
- set_and_check_instance(f, obj->get(), str, self, referrer, test);
+ set_and_check_instance(f, obj->Get(), str, self, referrer, test);
- set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
+ set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
#else
LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
@@ -1259,7 +1246,7 @@
extern "C" void art_quick_get64_static(void);
#endif
-static void GetSet64Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
+static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__x86_64__) || defined(__aarch64__)
@@ -1294,7 +1281,7 @@
extern "C" void art_quick_get64_instance(void);
#endif
-static void GetSet64Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
+static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__x86_64__) || defined(__aarch64__)
@@ -1303,20 +1290,20 @@
for (size_t i = 0; i < num_values; ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
reinterpret_cast<uintptr_t>(&art_quick_set64_instance),
self,
referrer);
- int64_t res = f->get()->GetLong(obj->get());
+ int64_t res = f->Get()->GetLong(obj->Get());
EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
res++;
- f->get()->SetLong<false>(obj->get(), res);
+ f->Get()->SetLong<false>(obj->Get(), res);
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
0U,
reinterpret_cast<uintptr_t>(&art_quick_get64_instance),
self,
@@ -1340,41 +1327,41 @@
CHECK(o != NULL);
ScopedObjectAccess soa(self);
- SirtRef<mirror::Object> obj(self, soa.Decode<mirror::Object*>(o));
-
- SirtRef<mirror::Class> c(self, obj->GetClass());
-
+ StackHandleScope<5> hs(self);
+ Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
+ Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
// Need a method as a referrer
- SirtRef<mirror::ArtMethod> m(self, c->GetDirectMethod(0));
+ Handle<mirror::ArtMethod> m(hs.NewHandle(c->GetDirectMethod(0)));
// Play with it...
// Static fields.
{
- SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetSFields());
+ Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetSFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- SirtRef<mirror::ArtField> f(self, fields->Get(i));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
- FieldHelper fh(f.get());
+ FieldHelper fh(f.Get());
Primitive::Type type = fh.GetTypeAsPrimitiveType();
switch (type) {
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Static(&obj, &f, self, m.get(), test);
+ GetSet32Static(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Static(&obj, &f, self, m.get(), test);
+ GetSet64Static(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(&obj, &f, self, m.get(), test);
+ GetSetObjStatic(&obj, &f, self, m.Get(), test);
}
break;
@@ -1386,30 +1373,31 @@
// Instance fields.
{
- SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetIFields());
+ Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetIFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- SirtRef<mirror::ArtField> f(self, fields->Get(i));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
- FieldHelper fh(f.get());
+ FieldHelper fh(f.Get());
Primitive::Type type = fh.GetTypeAsPrimitiveType();
switch (type) {
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Instance(&obj, &f, self, m.get(), test);
+ GetSet32Instance(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Instance(&obj, &f, self, m.get(), test);
+ GetSet64Instance(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
- GetSetObjInstance(&obj, &f, self, m.get(), test);
+ GetSetObjInstance(&obj, &f, self, m.Get(), test);
}
break;
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index c68d76a..8c98d91 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,8 +16,9 @@
#include "context_x86.h"
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
#include "stack.h"
namespace art {
@@ -37,16 +38,15 @@
void X86Context::FillCalleeSaves(const StackVisitor& fr) {
mirror::ArtMethod* method = fr.GetMethod();
- uint32_t core_spills = method->GetCoreSpillMask();
- size_t spill_count = POPCOUNT(core_spills);
- DCHECK_EQ(method->GetFpSpillMask(), 0u);
- size_t frame_size = method->GetFrameSizeInBytes();
+ const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+ DCHECK_EQ(frame_info.FpSpillMask(), 0u);
if (spill_count > 0) {
// Lowest number spill is farthest away, walk registers and fill into context.
int j = 2; // Offset j to skip return address spill.
for (int i = 0; i < kNumberOfCpuRegisters; i++) {
- if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
j++;
}
}
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
new file mode 100644
index 0000000..b9dc0d8
--- /dev/null
+++ b/runtime/arch/x86/quick_method_frame_info_x86.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
+#define ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86.h"
+#include "runtime.h" // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace x86 {
+
+static constexpr uint32_t kX86CalleeSaveRefSpills =
+ (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
+static constexpr uint32_t kX86CalleeSaveArgSpills =
+ (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+
+constexpr uint32_t X86CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+ return kX86CalleeSaveRefSpills | (type == Runtime::kRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+ (1 << art::x86::kNumberOfCpuRegisters); // fake return address callee save
+}
+
+constexpr uint32_t X86CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+ return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
+ 1 /* Method* */) * kX86PointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+ return QuickMethodFrameInfo(X86CalleeSaveFrameSize(type),
+ X86CalleeSaveCoreSpills(type),
+ 0u);
+}
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 29a7065..810ef94 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,8 +16,9 @@
#include "context_x86_64.h"
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
#include "stack.h"
namespace art {
@@ -40,17 +41,15 @@
void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
mirror::ArtMethod* method = fr.GetMethod();
- uint32_t core_spills = method->GetCoreSpillMask();
- uint32_t fp_core_spills = method->GetFpSpillMask();
- size_t spill_count = POPCOUNT(core_spills);
- size_t fp_spill_count = POPCOUNT(fp_core_spills);
- size_t frame_size = method->GetFrameSizeInBytes();
+ const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+ size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
if (spill_count > 0) {
// Lowest number spill is farthest away, walk registers and fill into context.
size_t j = 2; // Offset j to skip return address spill.
for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
- if (((core_spills >> i) & 1) != 0) {
- gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
j++;
}
}
@@ -59,8 +58,9 @@
// Lowest number spill is farthest away, walk registers and fill into context.
size_t j = 2; // Offset j to skip return address spill.
for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) {
- if (((fp_core_spills >> i) & 1) != 0) {
- fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+ if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+ fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+ frame_info.FrameSizeInBytes());
j++;
}
}
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index b886fb0..9c86c75 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -476,7 +476,6 @@
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- UNTESTED
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %rsp, %rsi // pass SP
@@ -489,7 +488,6 @@
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- UNTESTED
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %rsp, %rdx // pass SP
@@ -995,7 +993,6 @@
jz 1f
jmp *%r10 // Tail call into method.
1:
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
@@ -1053,12 +1050,12 @@
* | Return |
* | Callee-Save Data |
* #-------------------#
- * | SIRT |
+ * | handle scope |
* #-------------------#
* | Method* | <--- (1)
* #-------------------#
* | local ref cookie | // 4B
- * | SIRT size | // 4B TODO: roll into call stack alignment?
+ * | handle scope size | // 4B TODO: roll into call stack alignment?
* #-------------------#
* | JNI Call Stack |
* #-------------------# <--- SP on native call
@@ -1111,8 +1108,8 @@
//
// 4 local state ref
// 4 padding
- // 4196 4k scratch space, enough for 2x 256 8-byte parameters (TODO: SIRT overhead?)
- // 16 SIRT member fields ?
+ // 4196 4k scratch space, enough for 2x 256 8-byte parameters (TODO: handle scope overhead?)
+ // 16 handle scope member fields ?
// + 112 14x 8-byte stack-2-register space
// ------
// 4332
@@ -1217,7 +1214,7 @@
movq %rbx, %rsp
CFI_DEF_CFA_REGISTER(rsp)
.Lexception_in_native:
- // TODO: the SIRT contains the this pointer which is used by the debugger for exception
+ // TODO: the handle scope contains the this pointer which is used by the debugger for exception
// delivery.
movq %xmm0, 16(%rsp) // doesn't make sense!!!
movq 24(%rsp), %xmm1 // neither does this!!!
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
new file mode 100644
index 0000000..6183909
--- /dev/null
+++ b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86_64.h"
+#include "runtime.h" // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace x86_64 {
+
+static constexpr uint32_t kX86_64CalleeSaveRefSpills =
+ (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
+ (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
+static constexpr uint32_t kX86_64CalleeSaveArgSpills =
+ (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
+ (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
+static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
+ (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
+ (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+ (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
+
+constexpr uint32_t X86_64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+ return kX86_64CalleeSaveRefSpills |
+ (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
+ (1 << art::x86_64::kNumberOfCpuRegisters); // fake return address callee save;
+}
+
+constexpr uint32_t X86_64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+ return (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0);
+}
+
+constexpr uint32_t X86_64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+ return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
+ POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+ return QuickMethodFrameInfo(X86_64CalleeSaveFrameSize(type),
+ X86_64CalleeSaveCoreSpills(type),
+ X86_64CalleeSaveFpSpills(type));
+}
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 0c7fd87..d3e6bae 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -18,7 +18,7 @@
#define ART_RUNTIME_BARRIER_H_
#include "base/mutex.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index 7d32338..a02c4c7 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -22,7 +22,7 @@
#include "common_runtime_test.h"
#include "mirror/object_array-inl.h"
#include "thread_pool.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
class CheckWaitTask : public Task {
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index 2ff55cb..990d1db 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "bit_vector.h"
#include "gtest/gtest.h"
diff --git a/runtime/base/histogram_test.cc b/runtime/base/histogram_test.cc
index 966b97f..d72ae47 100644
--- a/runtime/base/histogram_test.cc
+++ b/runtime/base/histogram_test.cc
@@ -16,7 +16,7 @@
#include "gtest/gtest.h"
#include "histogram-inl.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include <sstream>
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 730a2c2..b6c6b9b 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -19,7 +19,7 @@
#include "base/mutex.h"
#include "runtime.h"
#include "thread-inl.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utils.h"
namespace art {
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 6944278..7800cfe 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -25,7 +25,7 @@
#include <vector>
#include "base/macros.h"
#include "log_severity.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#define CHECK(x) \
if (UNLIKELY(!(x))) \
diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h
index 8a6605e..67e8c22 100644
--- a/runtime/base/unix_file/random_access_file_test.h
+++ b/runtime/base/unix_file/random_access_file_test.h
@@ -22,7 +22,7 @@
#include <string>
#include "common_runtime_test.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace unix_file {
diff --git a/runtime/catch_block_stack_visitor.cc b/runtime/catch_block_stack_visitor.cc
index 8d10a97..55b330a 100644
--- a/runtime/catch_block_stack_visitor.cc
+++ b/runtime/catch_block_stack_visitor.cc
@@ -19,7 +19,7 @@
#include "dex_instruction.h"
#include "mirror/art_method-inl.h"
#include "quick_exception_handler.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -50,7 +50,17 @@
}
if (dex_pc != DexFile::kDexNoIndex) {
bool clear_exception = false;
- uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception);
+ bool exc_changed = false;
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
+ uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception,
+ &exc_changed);
+ if (UNLIKELY(exc_changed)) {
+ DCHECK_EQ(DexFile::kDexNoIndex, found_dex_pc);
+ exception_->Assign(self_->GetException(nullptr)); // TODO: Throw location?
+ // There is a new context installed, delete it.
+ delete self_->GetLongJumpContext();
+ }
exception_handler_->SetClearException(clear_exception);
if (found_dex_pc != DexFile::kDexNoIndex) {
exception_handler_->SetHandlerDexPc(found_dex_pc);
diff --git a/runtime/catch_block_stack_visitor.h b/runtime/catch_block_stack_visitor.h
index 6f0fe11..f45cf03 100644
--- a/runtime/catch_block_stack_visitor.h
+++ b/runtime/catch_block_stack_visitor.h
@@ -19,7 +19,7 @@
#include "mirror/object-inl.h"
#include "stack.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -34,10 +34,10 @@
// Finds catch handler or prepares deoptimization.
class CatchBlockStackVisitor FINAL : public StackVisitor {
public:
- CatchBlockStackVisitor(Thread* self, Context* context, SirtRef<mirror::Throwable>& exception,
+ CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
QuickExceptionHandler* exception_handler)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context), self_(self), to_find_(self, exception->GetClass()),
+ : StackVisitor(self, context), self_(self), exception_(exception),
exception_handler_(exception_handler) {
}
@@ -48,7 +48,7 @@
Thread* const self_;
// The type of the exception catch block to find.
- SirtRef<mirror::Class> to_find_;
+ Handle<mirror::Throwable>* exception_;
QuickExceptionHandler* const exception_handler_;
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 3df050e..cfd0c00 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -86,9 +86,9 @@
* ===========================================================================
*/
-static bool IsSirtLocalRef(JNIEnv* env, jobject localRef) {
- return GetIndirectRefKind(localRef) == kSirtOrInvalid &&
- reinterpret_cast<JNIEnvExt*>(env)->self->SirtContains(localRef);
+static bool IsHandleScopeLocalRef(JNIEnv* env, jobject localRef) {
+ return GetIndirectRefKind(localRef) == kHandleScopeOrInvalid &&
+ reinterpret_cast<JNIEnvExt*>(env)->self->HandleScopeContains(localRef);
}
// Flags passed into ScopedCheck.
@@ -179,7 +179,7 @@
// times, so using "java.lang.Thread" instead of "java/lang/Thread" might work in some
// circumstances, but this is incorrect.
void CheckClassName(const char* class_name) {
- if (!IsValidJniClassName(class_name)) {
+ if ((class_name == nullptr) || !IsValidJniClassName(class_name)) {
JniAbortF(function_name_,
"illegal class name '%s'\n"
" (should be of the form 'package/Class', [Lpackage/Class;' or '[[B')",
@@ -1243,7 +1243,7 @@
static void DeleteLocalRef(JNIEnv* env, jobject localRef) {
CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, localRef);
- if (localRef != nullptr && GetIndirectRefKind(localRef) != kLocal && !IsSirtLocalRef(env, localRef)) {
+ if (localRef != nullptr && GetIndirectRefKind(localRef) != kLocal && !IsHandleScopeLocalRef(env, localRef)) {
JniAbortF(__FUNCTION__, "DeleteLocalRef on %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(localRef)).c_str(), localRef);
} else {
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index df88794..ce634e0 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -24,7 +24,7 @@
#include "mirror/iftable.h"
#include "mirror/object_array.h"
#include "object_utils.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -34,7 +34,8 @@
}
inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* descriptor) {
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
return FindClass(self, descriptor, class_loader);
}
@@ -49,7 +50,8 @@
DCHECK(!element_class->IsPrimitiveVoid());
std::string descriptor("[");
descriptor += ClassHelper(element_class).GetDescriptor();
- SirtRef<mirror::ClassLoader> class_loader(self, element_class->GetClassLoader());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(element_class->GetClassLoader()));
mirror::Class* array_class = FindClass(self, descriptor.c_str(), class_loader);
// Benign races in storing array class and incrementing index.
size_t victim_index = find_array_class_cache_next_victim_;
@@ -63,7 +65,8 @@
mirror::String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(resolved_string == NULL)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- SirtRef<mirror::DexCache> dex_cache(Thread::Current(), declaring_class->GetDexCache());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_string = ResolveString(dex_file, string_idx, dex_cache);
if (resolved_string != nullptr) {
@@ -76,11 +79,11 @@
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
mirror::ArtMethod* referrer) {
mirror::Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
- if (UNLIKELY(resolved_type == NULL)) {
+ if (UNLIKELY(resolved_type == nullptr)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (resolved_type != nullptr) {
@@ -95,9 +98,9 @@
mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
if (UNLIKELY(resolved_type == NULL)) {
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, dex_cache_ptr);
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (resolved_type != nullptr) {
@@ -114,9 +117,9 @@
referrer->GetDexCacheResolvedMethods()->Get(method_idx);
if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type);
if (resolved_method != nullptr) {
@@ -133,9 +136,9 @@
mirror::ArtField* resolved_field =
declaring_class->GetDexCache()->GetResolvedField(field_idx);
if (UNLIKELY(resolved_field == NULL)) {
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
if (resolved_field != nullptr) {
@@ -174,9 +177,8 @@
inline mirror::ObjectArray<mirror::ArtField>* ClassLinker::AllocArtFieldArray(Thread* self,
size_t length) {
- return mirror::ObjectArray<mirror::ArtField>::Alloc(self,
- GetClassRoot(kJavaLangReflectArtFieldArrayClass),
- length);
+ return mirror::ObjectArray<mirror::ArtField>::Alloc(
+ self, GetClassRoot(kJavaLangReflectArtFieldArrayClass), length);
}
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e3c162b..c7f3a20 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -37,6 +37,7 @@
#include "gc/accounting/heap_bitmap.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
+#include "handle_scope.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "leb128.h"
@@ -59,10 +60,9 @@
#include "entrypoints/entrypoint_utils.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
-#include "stack_indirect_reference_table.h"
+#include "handle_scope-inl.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utils.h"
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
@@ -202,11 +202,12 @@
gc::Heap* heap = Runtime::Current()->GetHeap();
// The GC can't handle an object with a null class since we can't get the size of this object.
heap->IncrementDisableMovingGC(self);
- SirtRef<mirror::Class> java_lang_Class(self, down_cast<mirror::Class*>(
- heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass), VoidFunctor())));
- CHECK(java_lang_Class.get() != NULL);
- mirror::Class::SetClassClass(java_lang_Class.get());
- java_lang_Class->SetClass(java_lang_Class.get());
+ StackHandleScope<64> hs(self); // 64 is picked arbitrarily.
+ Handle<mirror::Class> java_lang_Class(hs.NewHandle(down_cast<mirror::Class*>(
+ heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass), VoidFunctor()))));
+ CHECK(java_lang_Class.Get() != NULL);
+ mirror::Class::SetClassClass(java_lang_Class.Get());
+ java_lang_Class->SetClass(java_lang_Class.Get());
if (kUseBakerOrBrooksReadBarrier) {
java_lang_Class->AssertReadBarrierPointer();
}
@@ -215,44 +216,47 @@
// AllocClass(mirror::Class*) can now be used
// Class[] is used for reflection support.
- SirtRef<mirror::Class> class_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- class_array_class->SetComponentType(java_lang_Class.get());
+ Handle<mirror::Class> class_array_class(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ class_array_class->SetComponentType(java_lang_Class.Get());
// java_lang_Object comes next so that object_array_class can be created.
- SirtRef<mirror::Class> java_lang_Object(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- CHECK(java_lang_Object.get() != NULL);
+ Handle<mirror::Class> java_lang_Object(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ CHECK(java_lang_Object.Get() != NULL);
// backfill Object as the super class of Class.
- java_lang_Class->SetSuperClass(java_lang_Object.get());
+ java_lang_Class->SetSuperClass(java_lang_Object.Get());
java_lang_Object->SetStatus(mirror::Class::kStatusLoaded, self);
// Object[] next to hold class roots.
- SirtRef<mirror::Class> object_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- object_array_class->SetComponentType(java_lang_Object.get());
+ Handle<mirror::Class> object_array_class(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_class->SetComponentType(java_lang_Object.Get());
// Setup the char class to be used for char[].
- SirtRef<mirror::Class> char_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
+ Handle<mirror::Class> char_class(hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
// Setup the char[] class to be used for String.
- SirtRef<mirror::Class> char_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- char_array_class->SetComponentType(char_class.get());
- mirror::CharArray::SetArrayClass(char_array_class.get());
+ Handle<mirror::Class> char_array_class(hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ char_array_class->SetComponentType(char_class.Get());
+ mirror::CharArray::SetArrayClass(char_array_class.Get());
// Setup String.
- SirtRef<mirror::Class> java_lang_String(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::StringClass)));
- mirror::String::SetClass(java_lang_String.get());
+ Handle<mirror::Class> java_lang_String(hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::StringClass))));
+ mirror::String::SetClass(java_lang_String.Get());
java_lang_String->SetObjectSize(sizeof(mirror::String));
java_lang_String->SetStatus(mirror::Class::kStatusResolved, self);
// Create storage for root classes, save away our work so far (requires descriptors).
- class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.get(),
+ class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
kClassRootsMax);
CHECK(class_roots_ != NULL);
- SetClassRoot(kJavaLangClass, java_lang_Class.get());
- SetClassRoot(kJavaLangObject, java_lang_Object.get());
- SetClassRoot(kClassArrayClass, class_array_class.get());
- SetClassRoot(kObjectArrayClass, object_array_class.get());
- SetClassRoot(kCharArrayClass, char_array_class.get());
- SetClassRoot(kJavaLangString, java_lang_String.get());
+ SetClassRoot(kJavaLangClass, java_lang_Class.Get());
+ SetClassRoot(kJavaLangObject, java_lang_Object.Get());
+ SetClassRoot(kClassArrayClass, class_array_class.Get());
+ SetClassRoot(kObjectArrayClass, object_array_class.Get());
+ SetClassRoot(kCharArrayClass, char_array_class.Get());
+ SetClassRoot(kJavaLangString, java_lang_String.Get());
// Setup the primitive type classes.
SetClassRoot(kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean));
@@ -268,53 +272,54 @@
array_iftable_ = AllocIfTable(self, 2);
// Create int array type for AllocDexCache (done in AppendToBootClassPath).
- SirtRef<mirror::Class> int_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
+ Handle<mirror::Class> int_array_class(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
int_array_class->SetComponentType(GetClassRoot(kPrimitiveInt));
- mirror::IntArray::SetArrayClass(int_array_class.get());
- SetClassRoot(kIntArrayClass, int_array_class.get());
+ mirror::IntArray::SetArrayClass(int_array_class.Get());
+ SetClassRoot(kIntArrayClass, int_array_class.Get());
// now that these are registered, we can use AllocClass() and AllocObjectArray
// Set up DexCache. This cannot be done later since AppendToBootClassPath calls AllocDexCache.
- SirtRef<mirror::Class>
- java_lang_DexCache(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::DexCacheClass)));
- SetClassRoot(kJavaLangDexCache, java_lang_DexCache.get());
+ Handle<mirror::Class> java_lang_DexCache(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::DexCacheClass))));
+ SetClassRoot(kJavaLangDexCache, java_lang_DexCache.Get());
java_lang_DexCache->SetObjectSize(sizeof(mirror::DexCache));
java_lang_DexCache->SetStatus(mirror::Class::kStatusResolved, self);
// Constructor, Field, Method, and AbstractMethod are necessary so that FindClass can link members.
- SirtRef<mirror::Class> java_lang_reflect_ArtField(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::ArtFieldClass)));
- CHECK(java_lang_reflect_ArtField.get() != NULL);
+ Handle<mirror::Class> java_lang_reflect_ArtField(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::ArtFieldClass))));
+ CHECK(java_lang_reflect_ArtField.Get() != NULL);
java_lang_reflect_ArtField->SetObjectSize(sizeof(mirror::ArtField));
- SetClassRoot(kJavaLangReflectArtField, java_lang_reflect_ArtField.get());
+ SetClassRoot(kJavaLangReflectArtField, java_lang_reflect_ArtField.Get());
java_lang_reflect_ArtField->SetStatus(mirror::Class::kStatusResolved, self);
- mirror::ArtField::SetClass(java_lang_reflect_ArtField.get());
+ mirror::ArtField::SetClass(java_lang_reflect_ArtField.Get());
- SirtRef<mirror::Class> java_lang_reflect_ArtMethod(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::ArtMethodClass)));
- CHECK(java_lang_reflect_ArtMethod.get() != NULL);
+ Handle<mirror::Class> java_lang_reflect_ArtMethod(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::ArtMethodClass))));
+ CHECK(java_lang_reflect_ArtMethod.Get() != NULL);
java_lang_reflect_ArtMethod->SetObjectSize(sizeof(mirror::ArtMethod));
- SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.get());
+ SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
- mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.get());
+ mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
// Set up array classes for string, field, method
- SirtRef<mirror::Class> object_array_string(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::Class)));
- object_array_string->SetComponentType(java_lang_String.get());
- SetClassRoot(kJavaLangStringArrayClass, object_array_string.get());
+ Handle<mirror::Class> object_array_string(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_string->SetComponentType(java_lang_String.Get());
+ SetClassRoot(kJavaLangStringArrayClass, object_array_string.Get());
- SirtRef<mirror::Class> object_array_art_method(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::Class)));
- object_array_art_method->SetComponentType(java_lang_reflect_ArtMethod.get());
- SetClassRoot(kJavaLangReflectArtMethodArrayClass, object_array_art_method.get());
+ Handle<mirror::Class> object_array_art_method(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_art_method->SetComponentType(java_lang_reflect_ArtMethod.Get());
+ SetClassRoot(kJavaLangReflectArtMethodArrayClass, object_array_art_method.Get());
- SirtRef<mirror::Class> object_array_art_field(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::Class)));
- object_array_art_field->SetComponentType(java_lang_reflect_ArtField.get());
- SetClassRoot(kJavaLangReflectArtFieldArrayClass, object_array_art_field.get());
+ Handle<mirror::Class> object_array_art_field(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_art_field->SetComponentType(java_lang_reflect_ArtField.Get());
+ SetClassRoot(kJavaLangReflectArtFieldArrayClass, object_array_art_field.Get());
// Setup boot_class_path_ and register class_path now that we can use AllocObjectArray to create
// DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
@@ -329,8 +334,8 @@
// now we can use FindSystemClass
// run char class through InitializePrimitiveClass to finish init
- InitializePrimitiveClass(char_class.get(), Primitive::kPrimChar);
- SetClassRoot(kPrimitiveChar, char_class.get()); // needs descriptor
+ InitializePrimitiveClass(char_class.Get(), Primitive::kPrimChar);
+ SetClassRoot(kPrimitiveChar, char_class.Get()); // needs descriptor
// Create runtime resolution and imt conflict methods. Also setup the default imt.
Runtime* runtime = Runtime::Current();
@@ -341,16 +346,16 @@
// Object, String and DexCache need to be rerun through FindSystemClass to finish init
java_lang_Object->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* Object_class = FindSystemClass(self, "Ljava/lang/Object;");
- CHECK_EQ(java_lang_Object.get(), Object_class);
+ CHECK_EQ(java_lang_Object.Get(), Object_class);
CHECK_EQ(java_lang_Object->GetObjectSize(), sizeof(mirror::Object));
java_lang_String->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* String_class = FindSystemClass(self, "Ljava/lang/String;");
- CHECK_EQ(java_lang_String.get(), String_class);
+ CHECK_EQ(java_lang_String.Get(), String_class);
CHECK_EQ(java_lang_String->GetObjectSize(), sizeof(mirror::String));
java_lang_DexCache->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* DexCache_class = FindSystemClass(self, "Ljava/lang/DexCache;");
- CHECK_EQ(java_lang_String.get(), String_class);
- CHECK_EQ(java_lang_DexCache.get(), DexCache_class);
+ CHECK_EQ(java_lang_String.Get(), String_class);
+ CHECK_EQ(java_lang_DexCache.Get(), DexCache_class);
CHECK_EQ(java_lang_DexCache->GetObjectSize(), sizeof(mirror::DexCache));
// Setup the primitive array type classes - can't be done until Object has a vtable.
@@ -361,13 +366,13 @@
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
mirror::Class* found_char_array_class = FindSystemClass(self, "[C");
- CHECK_EQ(char_array_class.get(), found_char_array_class);
+ CHECK_EQ(char_array_class.Get(), found_char_array_class);
SetClassRoot(kShortArrayClass, FindSystemClass(self, "[S"));
mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
mirror::Class* found_int_array_class = FindSystemClass(self, "[I");
- CHECK_EQ(int_array_class.get(), found_int_array_class);
+ CHECK_EQ(int_array_class.Get(), found_int_array_class);
SetClassRoot(kLongArrayClass, FindSystemClass(self, "[J"));
mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
@@ -379,10 +384,10 @@
mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
mirror::Class* found_class_array_class = FindSystemClass(self, "[Ljava/lang/Class;");
- CHECK_EQ(class_array_class.get(), found_class_array_class);
+ CHECK_EQ(class_array_class.Get(), found_class_array_class);
mirror::Class* found_object_array_class = FindSystemClass(self, "[Ljava/lang/Object;");
- CHECK_EQ(object_array_class.get(), found_object_array_class);
+ CHECK_EQ(object_array_class.Get(), found_object_array_class);
// Setup the single, global copy of "iftable".
mirror::Class* java_lang_Cloneable = FindSystemClass(self, "Ljava/lang/Cloneable;");
@@ -395,35 +400,35 @@
array_iftable_->SetInterface(1, java_io_Serializable);
// Sanity check Class[] and Object[]'s interfaces.
- ClassHelper kh(class_array_class.get());
+ ClassHelper kh(class_array_class.Get());
CHECK_EQ(java_lang_Cloneable, kh.GetDirectInterface(0));
CHECK_EQ(java_io_Serializable, kh.GetDirectInterface(1));
- kh.ChangeClass(object_array_class.get());
+ kh.ChangeClass(object_array_class.Get());
CHECK_EQ(java_lang_Cloneable, kh.GetDirectInterface(0));
CHECK_EQ(java_io_Serializable, kh.GetDirectInterface(1));
// Run Class, ArtField, and ArtMethod through FindSystemClass. This initializes their
// dex_cache_ fields and register them in class_table_.
mirror::Class* Class_class = FindSystemClass(self, "Ljava/lang/Class;");
- CHECK_EQ(java_lang_Class.get(), Class_class);
+ CHECK_EQ(java_lang_Class.Get(), Class_class);
java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* Art_method_class = FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;");
- CHECK_EQ(java_lang_reflect_ArtMethod.get(), Art_method_class);
+ CHECK_EQ(java_lang_reflect_ArtMethod.Get(), Art_method_class);
java_lang_reflect_ArtField->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* Art_field_class = FindSystemClass(self, "Ljava/lang/reflect/ArtField;");
- CHECK_EQ(java_lang_reflect_ArtField.get(), Art_field_class);
+ CHECK_EQ(java_lang_reflect_ArtField.Get(), Art_field_class);
mirror::Class* String_array_class = FindSystemClass(self, class_roots_descriptors_[kJavaLangStringArrayClass]);
- CHECK_EQ(object_array_string.get(), String_array_class);
+ CHECK_EQ(object_array_string.Get(), String_array_class);
mirror::Class* Art_method_array_class =
FindSystemClass(self, class_roots_descriptors_[kJavaLangReflectArtMethodArrayClass]);
- CHECK_EQ(object_array_art_method.get(), Art_method_array_class);
+ CHECK_EQ(object_array_art_method.Get(), Art_method_array_class);
mirror::Class* Art_field_array_class =
FindSystemClass(self, class_roots_descriptors_[kJavaLangReflectArtFieldArrayClass]);
- CHECK_EQ(object_array_art_field.get(), Art_field_array_class);
+ CHECK_EQ(object_array_art_field.Get(), Art_field_array_class);
// End of special init trickery, subsequent classes may be loaded via FindSystemClass.
@@ -529,8 +534,9 @@
for (size_t i = 0; i < ClassLinker::kClassRootsMax; ++i) {
mirror::Class* c = GetClassRoot(ClassRoot(i));
if (!c->IsArrayClass() && !c->IsPrimitive()) {
- SirtRef<mirror::Class> sirt_class(self, GetClassRoot(ClassRoot(i)));
- EnsureInitialized(sirt_class, true, true);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(GetClassRoot(ClassRoot(i))));
+ EnsureInitialized(h_class, true, true);
self->AssertNoPendingException();
}
}
@@ -1027,10 +1033,11 @@
mirror::ObjectArray<mirror::DexCache>* dex_caches =
dex_caches_object->AsObjectArray<mirror::DexCache>();
- SirtRef<mirror::ObjectArray<mirror::Class> > class_roots(
- self,
- space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray<mirror::Class>());
- class_roots_ = class_roots.get();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::Class>> class_roots(hs.NewHandle(
+ space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->
+ AsObjectArray<mirror::Class>()));
+ class_roots_ = class_roots.Get();
// Special case of setting up the String class early so that we can test arbitrary objects
// as being Strings or not
@@ -1039,7 +1046,8 @@
CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- SirtRef<mirror::DexCache> dex_cache(self, dex_caches->Get(i));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_caches->Get(i)));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
nullptr);
@@ -1069,7 +1077,7 @@
// reinit class_roots_
mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
- class_roots_ = class_roots.get();
+ class_roots_ = class_roots.Get();
// reinit array_iftable_ from any array class instance, they should be ==
array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
@@ -1212,42 +1220,43 @@
mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- SirtRef<mirror::Class> dex_cache_class(self, GetClassRoot(kJavaLangDexCache));
- SirtRef<mirror::DexCache> dex_cache(
- self, down_cast<mirror::DexCache*>(
- heap->AllocObject<true>(self, dex_cache_class.get(), dex_cache_class->GetObjectSize(),
- VoidFunctor())));
- if (dex_cache.get() == NULL) {
+ StackHandleScope<16> hs(self);
+ Handle<mirror::Class> dex_cache_class(hs.NewHandle(GetClassRoot(kJavaLangDexCache)));
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(down_cast<mirror::DexCache*>(
+ heap->AllocObject<true>(self, dex_cache_class.Get(), dex_cache_class->GetObjectSize(),
+ VoidFunctor()))));
+ if (dex_cache.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::String>
- location(self, intern_table_->InternStrong(dex_file.GetLocation().c_str()));
- if (location.get() == NULL) {
+ Handle<mirror::String>
+ location(hs.NewHandle(intern_table_->InternStrong(dex_file.GetLocation().c_str())));
+ if (location.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::String> >
- strings(self, AllocStringArray(self, dex_file.NumStringIds()));
- if (strings.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::String> >
+ strings(hs.NewHandle(AllocStringArray(self, dex_file.NumStringIds())));
+ if (strings.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::Class> >
- types(self, AllocClassArray(self, dex_file.NumTypeIds()));
- if (types.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::Class> >
+ types(hs.NewHandle(AllocClassArray(self, dex_file.NumTypeIds())));
+ if (types.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- methods(self, AllocArtMethodArray(self, dex_file.NumMethodIds()));
- if (methods.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::ArtMethod> >
+ methods(hs.NewHandle(AllocArtMethodArray(self, dex_file.NumMethodIds())));
+ if (methods.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::ArtField> >
- fields(self, AllocArtFieldArray(self, dex_file.NumFieldIds()));
- if (fields.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::ArtField> >
+ fields(hs.NewHandle(AllocArtFieldArray(self, dex_file.NumFieldIds())));
+ if (fields.Get() == NULL) {
return NULL;
}
- dex_cache->Init(&dex_file, location.get(), strings.get(), types.get(), methods.get(),
- fields.get());
- return dex_cache.get();
+ dex_cache->Init(&dex_file, location.Get(), strings.Get(), types.Get(), methods.Get(),
+ fields.Get());
+ return dex_cache.Get();
}
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
@@ -1315,19 +1324,20 @@
DCHECK(klass != NULL);
// Wait for the class if it has not already been linked.
if (!klass->IsResolved() && !klass->IsErroneous()) {
- SirtRef<mirror::Class> sirt_class(self, klass);
- ObjectLock<mirror::Class> lock(self, &sirt_class);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ ObjectLock<mirror::Class> lock(self, &h_class);
// Check for circular dependencies between classes.
- if (!sirt_class->IsResolved() && sirt_class->GetClinitThreadId() == self->GetTid()) {
- ThrowClassCircularityError(sirt_class.get());
- sirt_class->SetStatus(mirror::Class::kStatusError, self);
+ if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
+ ThrowClassCircularityError(h_class.Get());
+ h_class->SetStatus(mirror::Class::kStatusError, self);
return nullptr;
}
// Wait for the pending initialization to complete.
- while (!sirt_class->IsResolved() && !sirt_class->IsErroneous()) {
+ while (!h_class->IsResolved() && !h_class->IsErroneous()) {
lock.WaitIgnoringInterrupts();
}
- klass = sirt_class.get();
+ klass = h_class.Get();
}
if (klass->IsErroneous()) {
ThrowEarlierClassFailure(klass);
@@ -1340,7 +1350,7 @@
}
mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader) {
+ const Handle<mirror::ClassLoader>& class_loader) {
DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
DCHECK(self != nullptr);
self->AssertNoPendingException();
@@ -1350,17 +1360,18 @@
return FindPrimitiveClass(descriptor[0]);
}
// Find the class in the loaded classes table.
- mirror::Class* klass = LookupClass(descriptor, class_loader.get());
+ mirror::Class* klass = LookupClass(descriptor, class_loader.Get());
if (klass != NULL) {
return EnsureResolved(self, klass);
}
// Class is not yet loaded.
if (descriptor[0] == '[') {
return CreateArrayClass(self, descriptor, class_loader);
- } else if (class_loader.get() == nullptr) {
+ } else if (class_loader.Get() == nullptr) {
DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, boot_class_path_);
if (pair.second != NULL) {
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
return DefineClass(descriptor, class_loader, *pair.first, *pair.second);
}
} else if (Runtime::Current()->UseCompileTimeClassPath()) {
@@ -1376,7 +1387,7 @@
{
ScopedObjectAccessUnchecked soa(self);
ScopedLocalRef<jobject> jclass_loader(soa.Env(),
- soa.AddLocalReference<jobject>(class_loader.get()));
+ soa.AddLocalReference<jobject>(class_loader.Get()));
class_path = &Runtime::Current()->GetCompileTimeClassPath(jclass_loader.get());
}
@@ -1388,7 +1399,7 @@
} else {
ScopedObjectAccessUnchecked soa(self);
ScopedLocalRef<jobject> class_loader_object(soa.Env(),
- soa.AddLocalReference<jobject>(class_loader.get()));
+ soa.AddLocalReference<jobject>(class_loader.Get()));
std::string class_name_string(DescriptorToDot(descriptor));
ScopedLocalRef<jobject> result(soa.Env(), NULL);
{
@@ -1422,38 +1433,39 @@
}
mirror::Class* ClassLinker::DefineClass(const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::ClassLoader>& class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
Thread* self = Thread::Current();
- SirtRef<mirror::Class> klass(self, NULL);
+ StackHandleScope<2> hs(self);
+ auto klass = hs.NewHandle<mirror::Class>(nullptr);
// Load the class from the dex file.
if (UNLIKELY(!init_done_)) {
// finish up init of hand crafted class_roots_
if (strcmp(descriptor, "Ljava/lang/Object;") == 0) {
- klass.reset(GetClassRoot(kJavaLangObject));
+ klass.Assign(GetClassRoot(kJavaLangObject));
} else if (strcmp(descriptor, "Ljava/lang/Class;") == 0) {
- klass.reset(GetClassRoot(kJavaLangClass));
+ klass.Assign(GetClassRoot(kJavaLangClass));
} else if (strcmp(descriptor, "Ljava/lang/String;") == 0) {
- klass.reset(GetClassRoot(kJavaLangString));
+ klass.Assign(GetClassRoot(kJavaLangString));
} else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
- klass.reset(GetClassRoot(kJavaLangDexCache));
+ klass.Assign(GetClassRoot(kJavaLangDexCache));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtField;") == 0) {
- klass.reset(GetClassRoot(kJavaLangReflectArtField));
+ klass.Assign(GetClassRoot(kJavaLangReflectArtField));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtMethod;") == 0) {
- klass.reset(GetClassRoot(kJavaLangReflectArtMethod));
+ klass.Assign(GetClassRoot(kJavaLangReflectArtMethod));
} else {
- klass.reset(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
+ klass.Assign(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
}
} else {
- klass.reset(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
+ klass.Assign(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
}
- if (UNLIKELY(klass.get() == NULL)) {
+ if (UNLIKELY(klass.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
return NULL;
}
klass->SetDexCache(FindDexCache(dex_file));
- LoadClass(dex_file, dex_class_def, klass, class_loader.get());
+ LoadClass(dex_file, dex_class_def, klass, class_loader.Get());
// Check for a pending exception during load
if (self->IsExceptionPending()) {
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -1462,7 +1474,7 @@
ObjectLock<mirror::Class> lock(self, &klass);
klass->SetClinitThreadId(self->GetTid());
// Add the newly loaded class to the loaded classes table.
- mirror::Class* existing = InsertClass(descriptor, klass.get(), Hash(descriptor));
+ mirror::Class* existing = InsertClass(descriptor, klass.Get(), Hash(descriptor));
if (existing != NULL) {
// We failed to insert because we raced with another thread. Calling EnsureResolved may cause
// this thread to block.
@@ -1479,7 +1491,7 @@
// Link the class (if necessary)
CHECK(!klass->IsResolved());
// TODO: Use fast jobjects?
- SirtRef<mirror::ObjectArray<mirror::Class> > interfaces(self, nullptr);
+ auto interfaces = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
if (!LinkClass(self, klass, interfaces)) {
// Linking failed.
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -1498,9 +1510,9 @@
* The class has been prepared and resolved but possibly not yet verified
* at this point.
*/
- Dbg::PostClassPrepare(klass.get());
+ Dbg::PostClassPrepare(klass.Get());
- return klass.get();
+ return klass.Get();
}
// Precomputes size that will be needed for Class, matching LinkStaticFields
@@ -1765,7 +1777,7 @@
// Ignore virtual methods on the iterator.
}
-static void LinkCode(const SirtRef<mirror::ArtMethod>& method, const OatFile::OatClass* oat_class,
+static void LinkCode(const Handle<mirror::ArtMethod>& method, const OatFile::OatClass* oat_class,
const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Method shouldn't have already been linked.
@@ -1774,11 +1786,11 @@
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
- oat_method.LinkMethod(method.get());
+ oat_method.LinkMethod(method.Get());
// Install entry point from interpreter.
Runtime* runtime = Runtime::Current();
- bool enter_interpreter = NeedsInterpreter(method.get(),
+ bool enter_interpreter = NeedsInterpreter(method.Get(),
method->GetEntryPointFromQuickCompiledCode(),
method->GetEntryPointFromPortableCompiledCode());
if (enter_interpreter && !method->IsNative()) {
@@ -1828,32 +1840,11 @@
DCHECK(method->GetEntryPointFromQuickCompiledCode() ==
GetQuickResolutionTrampoline(runtime->GetClassLinker())
|| method->GetEntryPointFromQuickCompiledCode() == GetQuickGenericJniTrampoline());
-
- DCHECK_EQ(method->GetFrameSizeInBytes<false>(), 0U);
-
- // Fix up method metadata if necessary.
- uint32_t s_len;
- const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_index), &s_len);
- uint32_t refs = 1; // Native method always has "this" or class.
- for (uint32_t i = 1; i < s_len; ++i) {
- if (shorty[i] == 'L') {
- refs++;
- }
- }
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(refs);
-
- // Get the generic spill masks and base frame size.
- mirror::ArtMethod* callee_save_method =
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
- method->SetFrameSizeInBytes(callee_save_method->GetFrameSizeInBytes() + sirt_size);
- method->SetCoreSpillMask(callee_save_method->GetCoreSpillMask());
- method->SetFpSpillMask(callee_save_method->GetFpSpillMask());
}
}
// Allow instrumentation its chance to hijack code.
- runtime->GetInstrumentation()->UpdateMethodsCode(method.get(),
+ runtime->GetInstrumentation()->UpdateMethodsCode(method.Get(),
method->GetEntryPointFromQuickCompiledCode(),
method->GetEntryPointFromPortableCompiledCode(),
have_portable_code);
@@ -1861,9 +1852,9 @@
void ClassLinker::LoadClass(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader) {
- CHECK(klass.get() != NULL);
+ CHECK(klass.Get() != NULL);
CHECK(klass->GetDexCache() != NULL);
CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus());
const char* descriptor = dex_file.GetClassDescriptor(dex_class_def);
@@ -1899,7 +1890,7 @@
void ClassLinker::LoadClassMembers(const DexFile& dex_file,
const byte* class_data,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class) {
// Load fields.
@@ -1923,21 +1914,23 @@
klass->SetIFields(fields);
}
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
- SirtRef<mirror::ArtField> sfield(self, AllocArtField(self));
- if (UNLIKELY(sfield.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> sfield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(sfield.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetStaticField(i, sfield.get());
+ klass->SetStaticField(i, sfield.Get());
LoadField(dex_file, it, klass, sfield);
}
for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
- SirtRef<mirror::ArtField> ifield(self, AllocArtField(self));
- if (UNLIKELY(ifield.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> ifield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(ifield.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetInstanceField(i, ifield.get());
+ klass->SetInstanceField(i, ifield.Get());
LoadField(dex_file, it, klass, ifield);
}
@@ -1964,12 +1957,13 @@
}
size_t class_def_method_index = 0;
for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
- SirtRef<mirror::ArtMethod> method(self, LoadMethod(self, dex_file, it, klass));
- if (UNLIKELY(method.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
+ if (UNLIKELY(method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetDirectMethod(i, method.get());
+ klass->SetDirectMethod(i, method.Get());
if (oat_class != nullptr) {
LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
}
@@ -1977,12 +1971,13 @@
class_def_method_index++;
}
for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
- SirtRef<mirror::ArtMethod> method(self, LoadMethod(self, dex_file, it, klass));
- if (UNLIKELY(method.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
+ if (UNLIKELY(method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetVirtualMethod(i, method.get());
+ klass->SetVirtualMethod(i, method.Get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
if (oat_class != nullptr) {
LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
@@ -1993,17 +1988,17 @@
}
void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it,
- const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ArtField>& dst) {
+ const Handle<mirror::Class>& klass,
+ const Handle<mirror::ArtField>& dst) {
uint32_t field_idx = it.GetMemberIndex();
dst->SetDexFieldIndex(field_idx);
- dst->SetDeclaringClass(klass.get());
+ dst->SetDeclaringClass(klass.Get());
dst->SetAccessFlags(it.GetMemberAccessFlags());
}
mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
const ClassDataItemIterator& it,
- const SirtRef<mirror::Class>& klass) {
+ const Handle<mirror::Class>& klass) {
uint32_t dex_method_idx = it.GetMemberIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
@@ -2017,7 +2012,7 @@
const char* old_cause = self->StartAssertNoThreadSuspension("LoadMethod");
dst->SetDexMethodIndex(dex_method_idx);
- dst->SetDeclaringClass(klass.get());
+ dst->SetDeclaringClass(klass.Get());
dst->SetCodeItemOffset(it.GetMethodCodeItemOffset());
dst->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
@@ -2033,7 +2028,7 @@
if (klass->GetClassLoader() != NULL) { // All non-boot finalizer methods are flagged
klass->SetFinalizable();
} else {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
const char* klass_descriptor = kh.GetDescriptor();
// The Enum class declares a "final" finalize() method to prevent subclasses from
// introducing a finalizer. We don't want to set the finalizable flag for Enum or its
@@ -2055,7 +2050,7 @@
} else {
if (UNLIKELY((access_flags & kAccConstructor) == 0)) {
LOG(WARNING) << method_name << " didn't have expected constructor access flag in class "
- << PrettyDescriptor(klass.get()) << " in dex file " << dex_file.GetLocation();
+ << PrettyDescriptor(klass.Get()) << " in dex file " << dex_file.GetLocation();
access_flags |= kAccConstructor;
}
}
@@ -2068,14 +2063,15 @@
void ClassLinker::AppendToBootClassPath(const DexFile& dex_file) {
Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, AllocDexCache(self, dex_file));
- CHECK(dex_cache.get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ CHECK(dex_cache.Get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
}
void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
- const SirtRef<mirror::DexCache>& dex_cache) {
- CHECK(dex_cache.get() != NULL) << dex_file.GetLocation();
+ const Handle<mirror::DexCache>& dex_cache) {
+ CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
boot_class_path_.push_back(&dex_file);
RegisterDexFile(dex_file, dex_cache);
}
@@ -2096,12 +2092,12 @@
}
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
- const SirtRef<mirror::DexCache>& dex_cache) {
+ const Handle<mirror::DexCache>& dex_cache) {
dex_lock_.AssertExclusiveHeld(Thread::Current());
- CHECK(dex_cache.get() != NULL) << dex_file.GetLocation();
+ CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
<< dex_cache->GetLocation()->ToModifiedUtf8() << " " << dex_file.GetLocation();
- dex_caches_.push_back(dex_cache.get());
+ dex_caches_.push_back(dex_cache.Get());
dex_cache->SetDexFile(&dex_file);
if (log_new_dex_caches_roots_) {
// TODO: This is not safe if we can remove dex caches.
@@ -2120,8 +2116,9 @@
// Don't alloc while holding the lock, since allocation may need to
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
- SirtRef<mirror::DexCache> dex_cache(self, AllocDexCache(self, dex_file));
- CHECK(dex_cache.get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ CHECK(dex_cache.Get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
{
WriterMutexLock mu(self, dex_lock_);
if (IsDexFileRegisteredLocked(dex_file)) {
@@ -2132,7 +2129,7 @@
}
void ClassLinker::RegisterDexFile(const DexFile& dex_file,
- const SirtRef<mirror::DexCache>& dex_cache) {
+ const Handle<mirror::DexCache>& dex_cache) {
WriterMutexLock mu(Thread::Current(), dex_lock_);
RegisterDexFileLocked(dex_file, dex_cache);
}
@@ -2183,8 +2180,9 @@
CHECK(primitive_class != NULL);
// Must hold lock on object when initializing.
Thread* self = Thread::Current();
- SirtRef<mirror::Class> sirt_class(self, primitive_class);
- ObjectLock<mirror::Class> lock(self, &sirt_class);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(primitive_class));
+ ObjectLock<mirror::Class> lock(self, &h_class);
primitive_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
primitive_class->SetPrimitiveType(type);
primitive_class->SetStatus(mirror::Class::kStatusInitialized, self);
@@ -2208,15 +2206,19 @@
//
// Returns NULL with an exception raised on failure.
mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader) {
+ const Handle<mirror::ClassLoader>& class_loader) {
// Identify the underlying component type
CHECK_EQ('[', descriptor[0]);
- SirtRef<mirror::Class> component_type(self, FindClass(self, descriptor + 1, class_loader));
- if (component_type.get() == nullptr) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1, class_loader)));
+ if (component_type.Get() == nullptr) {
DCHECK(self->IsExceptionPending());
return nullptr;
}
-
+ if (UNLIKELY(component_type->IsPrimitiveVoid())) {
+ ThrowNoClassDefFoundError("Attempt to create array of void primitive type");
+ return nullptr;
+ }
// See if the component type is already loaded. Array classes are
// always associated with the class loader of their underlying
// element type -- an array of Strings goes with the loader for
@@ -2234,7 +2236,7 @@
// because we effectively do this lookup again when we add the new
// class to the hash table --- necessary because of possible races with
// other threads.)
- if (class_loader.get() != component_type->GetClassLoader()) {
+ if (class_loader.Get() != component_type->GetClassLoader()) {
mirror::Class* new_class = LookupClass(descriptor, component_type->GetClassLoader());
if (new_class != NULL) {
return new_class;
@@ -2249,33 +2251,33 @@
//
// Array classes are simple enough that we don't need to do a full
// link step.
- SirtRef<mirror::Class> new_class(self, NULL);
+ auto new_class = hs.NewHandle<mirror::Class>(nullptr);
if (UNLIKELY(!init_done_)) {
// Classes that were hand created, ie not by FindSystemClass
if (strcmp(descriptor, "[Ljava/lang/Class;") == 0) {
- new_class.reset(GetClassRoot(kClassArrayClass));
+ new_class.Assign(GetClassRoot(kClassArrayClass));
} else if (strcmp(descriptor, "[Ljava/lang/Object;") == 0) {
- new_class.reset(GetClassRoot(kObjectArrayClass));
+ new_class.Assign(GetClassRoot(kObjectArrayClass));
} else if (strcmp(descriptor, class_roots_descriptors_[kJavaLangStringArrayClass]) == 0) {
- new_class.reset(GetClassRoot(kJavaLangStringArrayClass));
+ new_class.Assign(GetClassRoot(kJavaLangStringArrayClass));
} else if (strcmp(descriptor,
class_roots_descriptors_[kJavaLangReflectArtMethodArrayClass]) == 0) {
- new_class.reset(GetClassRoot(kJavaLangReflectArtMethodArrayClass));
+ new_class.Assign(GetClassRoot(kJavaLangReflectArtMethodArrayClass));
} else if (strcmp(descriptor,
class_roots_descriptors_[kJavaLangReflectArtFieldArrayClass]) == 0) {
- new_class.reset(GetClassRoot(kJavaLangReflectArtFieldArrayClass));
+ new_class.Assign(GetClassRoot(kJavaLangReflectArtFieldArrayClass));
} else if (strcmp(descriptor, "[C") == 0) {
- new_class.reset(GetClassRoot(kCharArrayClass));
+ new_class.Assign(GetClassRoot(kCharArrayClass));
} else if (strcmp(descriptor, "[I") == 0) {
- new_class.reset(GetClassRoot(kIntArrayClass));
+ new_class.Assign(GetClassRoot(kIntArrayClass));
}
}
- if (new_class.get() == nullptr) {
- new_class.reset(AllocClass(self, sizeof(mirror::Class)));
- if (new_class.get() == nullptr) {
+ if (new_class.Get() == nullptr) {
+ new_class.Assign(AllocClass(self, sizeof(mirror::Class)));
+ if (new_class.Get() == nullptr) {
return nullptr;
}
- new_class->SetComponentType(component_type.get());
+ new_class->SetComponentType(component_type.Get());
}
ObjectLock<mirror::Class> lock(self, &new_class); // Must hold lock on object when initializing.
DCHECK(new_class->GetComponentType() != NULL);
@@ -2315,9 +2317,9 @@
new_class->SetAccessFlags(access_flags);
- mirror::Class* existing = InsertClass(descriptor, new_class.get(), Hash(descriptor));
+ mirror::Class* existing = InsertClass(descriptor, new_class.Get(), Hash(descriptor));
if (existing == nullptr) {
- return new_class.get();
+ return new_class.Get();
}
// Another thread must have loaded the class after we
// started but before we finished. Abandon what we've
@@ -2549,7 +2551,7 @@
}
}
-void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
+void ClassLinker::VerifyClass(const Handle<mirror::Class>& klass) {
// TODO: assert that the monitor on the Class is held
Thread* self = Thread::Current();
ObjectLock<mirror::Class> lock(self, &klass);
@@ -2563,7 +2565,7 @@
// The class might already be erroneous, for example at compile time if we attempted to verify
// this class as a parent to another.
if (klass->IsErroneous()) {
- ThrowEarlierClassFailure(klass.get());
+ ThrowEarlierClassFailure(klass.Get());
return;
}
@@ -2571,7 +2573,7 @@
klass->SetStatus(mirror::Class::kStatusVerifying, self);
} else {
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
- << PrettyClass(klass.get());
+ << PrettyClass(klass.Get());
CHECK(!Runtime::Current()->IsCompiler());
klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime, self);
}
@@ -2583,8 +2585,9 @@
}
// Verify super class.
- SirtRef<mirror::Class> super(self, klass->GetSuperClass());
- if (super.get() != NULL) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> super(hs.NewHandle(klass->GetSuperClass()));
+ if (super.Get() != NULL) {
// Acquire lock to prevent races on verifying the super class.
ObjectLock<mirror::Class> lock(self, &super);
@@ -2593,16 +2596,16 @@
}
if (!super->IsCompileTimeVerified()) {
std::string error_msg(StringPrintf("Rejecting class %s that attempts to sub-class erroneous class %s",
- PrettyDescriptor(klass.get()).c_str(),
- PrettyDescriptor(super.get()).c_str()));
+ PrettyDescriptor(klass.Get()).c_str(),
+ PrettyDescriptor(super.Get()).c_str()));
LOG(ERROR) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
- SirtRef<mirror::Throwable> cause(self, self->GetException(NULL));
- if (cause.get() != nullptr) {
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
+ if (cause.Get() != nullptr) {
self->ClearException();
}
- ThrowVerifyError(klass.get(), "%s", error_msg.c_str());
- if (cause.get() != nullptr) {
- self->GetException(nullptr)->SetCause(cause.get());
+ ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
+ if (cause.Get() != nullptr) {
+ self->GetException(nullptr)->SetCause(cause.Get());
}
ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex());
if (Runtime::Current()->IsCompiler()) {
@@ -2616,26 +2619,26 @@
// Try to use verification information from the oat file, otherwise do runtime verification.
const DexFile& dex_file = *klass->GetDexCache()->GetDexFile();
mirror::Class::Status oat_file_class_status(mirror::Class::kStatusNotReady);
- bool preverified = VerifyClassUsingOatFile(dex_file, klass.get(), oat_file_class_status);
+ bool preverified = VerifyClassUsingOatFile(dex_file, klass.Get(), oat_file_class_status);
if (oat_file_class_status == mirror::Class::kStatusError) {
VLOG(class_linker) << "Skipping runtime verification of erroneous class "
- << PrettyDescriptor(klass.get()) << " in "
+ << PrettyDescriptor(klass.Get()) << " in "
<< klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
- ThrowVerifyError(klass.get(), "Rejecting class %s because it failed compile-time verification",
- PrettyDescriptor(klass.get()).c_str());
+ ThrowVerifyError(klass.Get(), "Rejecting class %s because it failed compile-time verification",
+ PrettyDescriptor(klass.Get()).c_str());
klass->SetStatus(mirror::Class::kStatusError, self);
return;
}
verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
std::string error_msg;
if (!preverified) {
- verifier_failure = verifier::MethodVerifier::VerifyClass(klass.get(),
+ verifier_failure = verifier::MethodVerifier::VerifyClass(klass.Get(),
Runtime::Current()->IsCompiler(),
&error_msg);
}
if (preverified || verifier_failure != verifier::MethodVerifier::kHardFailure) {
if (!preverified && verifier_failure != verifier::MethodVerifier::kNoFailure) {
- VLOG(class_linker) << "Soft verification failure in class " << PrettyDescriptor(klass.get())
+ VLOG(class_linker) << "Soft verification failure in class " << PrettyDescriptor(klass.Get())
<< " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< " because: " << error_msg;
}
@@ -2645,7 +2648,7 @@
if (verifier_failure == verifier::MethodVerifier::kNoFailure) {
// Even though there were no verifier failures we need to respect whether the super-class
// was verified or requiring runtime reverification.
- if (super.get() == NULL || super->IsVerified()) {
+ if (super.Get() == NULL || super->IsVerified()) {
klass->SetStatus(mirror::Class::kStatusVerified, self);
} else {
CHECK_EQ(super->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
@@ -2665,11 +2668,11 @@
}
}
} else {
- LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(klass.get())
+ LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(klass.Get())
<< " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< " because: " << error_msg;
self->AssertNoPendingException();
- ThrowVerifyError(klass.get(), "%s", error_msg.c_str());
+ ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
klass->SetStatus(mirror::Class::kStatusError, self);
}
if (preverified || verifier_failure == verifier::MethodVerifier::kNoFailure) {
@@ -2760,7 +2763,7 @@
}
void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
- const SirtRef<mirror::Class>& klass) {
+ const Handle<mirror::Class>& klass) {
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i));
}
@@ -2801,15 +2804,16 @@
static void CheckProxyConstructor(mirror::ArtMethod* constructor);
static void CheckProxyMethod(mirror::ArtMethod* method,
- SirtRef<mirror::ArtMethod>& prototype);
+ Handle<mirror::ArtMethod>& prototype);
mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccess& soa, jstring name,
jobjectArray interfaces, jobject loader,
jobjectArray methods, jobjectArray throws) {
Thread* self = soa.Self();
- SirtRef<mirror::Class> klass(self, AllocClass(self, GetClassRoot(kJavaLangClass),
- sizeof(mirror::SynthesizedProxyClass)));
- if (klass.get() == NULL) {
+ StackHandleScope<8> hs(self);
+ Handle<mirror::Class> klass(hs.NewHandle(AllocClass(self, GetClassRoot(kJavaLangClass),
+ sizeof(mirror::SynthesizedProxyClass))));
+ if (klass.Get() == NULL) {
CHECK(self->IsExceptionPending()); // OOME.
return NULL;
}
@@ -2834,38 +2838,38 @@
}
// 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
// our proxy, so Class.getInterfaces doesn't return the flattened set.
- SirtRef<mirror::ArtField> interfaces_sfield(self, AllocArtField(self));
- if (UNLIKELY(interfaces_sfield.get() == NULL)) {
+ Handle<mirror::ArtField> interfaces_sfield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(interfaces_sfield.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
- klass->SetStaticField(0, interfaces_sfield.get());
+ klass->SetStaticField(0, interfaces_sfield.Get());
interfaces_sfield->SetDexFieldIndex(0);
- interfaces_sfield->SetDeclaringClass(klass.get());
+ interfaces_sfield->SetDeclaringClass(klass.Get());
interfaces_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// 2. Create a static field 'throws' that holds exceptions thrown by our methods.
- SirtRef<mirror::ArtField> throws_sfield(self, AllocArtField(self));
- if (UNLIKELY(throws_sfield.get() == NULL)) {
+ Handle<mirror::ArtField> throws_sfield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(throws_sfield.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
- klass->SetStaticField(1, throws_sfield.get());
+ klass->SetStaticField(1, throws_sfield.Get());
throws_sfield->SetDexFieldIndex(1);
- throws_sfield->SetDeclaringClass(klass.get());
+ throws_sfield->SetDeclaringClass(klass.Get());
throws_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// Proxies have 1 direct method, the constructor
{
mirror::ObjectArray<mirror::ArtMethod>* directs = AllocArtMethodArray(self, 1);
- if (UNLIKELY(directs == NULL)) {
+ if (UNLIKELY(directs == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
klass->SetDirectMethods(directs);
mirror::ArtMethod* constructor = CreateProxyConstructor(self, klass, proxy_class);
- if (UNLIKELY(constructor == NULL)) {
+ if (UNLIKELY(constructor == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
klass->SetDirectMethod(0, constructor);
}
@@ -2882,13 +2886,14 @@
klass->SetVirtualMethods(virtuals);
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
+ StackHandleScope<1> hs(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- SirtRef<mirror::ArtMethod> prototype(self, decoded_methods->Get(i));
+ Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype);
- if (UNLIKELY(clone == NULL)) {
+ if (UNLIKELY(clone == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
klass->SetVirtualMethod(i, clone);
}
@@ -2900,26 +2905,27 @@
{
ObjectLock<mirror::Class> lock(self, &klass); // Must hold lock on object when resolved.
// Link the fields and virtual methods, creating vtable and iftables
- SirtRef<mirror::ObjectArray<mirror::Class> > sirt_interfaces(
- self, soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
- if (!LinkClass(self, klass, sirt_interfaces)) {
+ Handle<mirror::ObjectArray<mirror::Class> > h_interfaces(
+ hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)));
+ if (!LinkClass(self, klass, h_interfaces)) {
klass->SetStatus(mirror::Class::kStatusError, self);
return nullptr;
}
- interfaces_sfield->SetObject<false>(klass.get(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
- throws_sfield->SetObject<false>(klass.get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
+ interfaces_sfield->SetObject<false>(klass.Get(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
+ throws_sfield->SetObject<false>(klass.Get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
klass->SetStatus(mirror::Class::kStatusInitialized, self);
}
// sanity checks
if (kIsDebugBuild) {
- CHECK(klass->GetIFields() == NULL);
+ CHECK(klass->GetIFields() == nullptr);
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
+ StackHandleScope<1> hs(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- SirtRef<mirror::ArtMethod> prototype(self, decoded_methods->Get(i));
+ Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
CheckProxyMethod(klass->GetVirtualMethod(i), prototype);
}
@@ -2933,14 +2939,14 @@
CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name);
mirror::SynthesizedProxyClass* synth_proxy_class =
- down_cast<mirror::SynthesizedProxyClass*>(klass.get());
+ down_cast<mirror::SynthesizedProxyClass*>(klass.Get());
CHECK_EQ(synth_proxy_class->GetInterfaces(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
CHECK_EQ(synth_proxy_class->GetThrows(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
}
- std::string descriptor(GetDescriptorForProxy(klass.get()));
- mirror::Class* existing = InsertClass(descriptor.c_str(), klass.get(), Hash(descriptor.c_str()));
+ std::string descriptor(GetDescriptorForProxy(klass.Get()));
+ mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), Hash(descriptor.c_str()));
CHECK(existing == nullptr);
- return klass.get();
+ return klass.Get();
}
std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) {
@@ -2975,7 +2981,7 @@
mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::Class* proxy_class) {
// Create constructor for Proxy that must initialize h
mirror::ObjectArray<mirror::ArtMethod>* proxy_direct_methods =
@@ -2992,7 +2998,7 @@
}
// Make this constructor public and fix the class to be our Proxy version
constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
- constructor->SetDeclaringClass(klass.get());
+ constructor->SetDeclaringClass(klass.Get());
return constructor;
}
@@ -3006,12 +3012,12 @@
}
mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
- const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ArtMethod>& prototype) {
+ const Handle<mirror::Class>& klass,
+ const Handle<mirror::ArtMethod>& prototype) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
- prototype.get());
+ prototype.Get());
// We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
// as necessary
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(prototype->Clone(self));
@@ -3022,16 +3028,11 @@
// Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to
// the intersection of throw exceptions as defined in Proxy
- method->SetDeclaringClass(klass.get());
+ method->SetDeclaringClass(klass.Get());
method->SetAccessFlags((method->GetAccessFlags() & ~kAccAbstract) | kAccFinal);
// At runtime the method looks like a reference and argument saving method, clone the code
// related parameters from this method.
- mirror::ArtMethod* refs_and_args =
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
- method->SetCoreSpillMask(refs_and_args->GetCoreSpillMask());
- method->SetFpSpillMask(refs_and_args->GetFpSpillMask());
- method->SetFrameSizeInBytes(refs_and_args->GetFrameSizeInBytes());
method->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler());
method->SetEntryPointFromPortableCompiledCode(GetPortableProxyInvokeHandler());
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
@@ -3040,7 +3041,7 @@
}
static void CheckProxyMethod(mirror::ArtMethod* method,
- SirtRef<mirror::ArtMethod>& prototype)
+ Handle<mirror::ArtMethod>& prototype)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Basic sanity
CHECK(!prototype->IsFinal());
@@ -3055,7 +3056,7 @@
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
MethodHelper mh(method);
- MethodHelper mh2(prototype.get());
+ MethodHelper mh2(prototype.Get());
CHECK_STREQ(mh.GetName(), mh2.GetName());
CHECK_STREQ(mh.GetShorty(), mh2.GetShorty());
// More complex sanity - via dex cache
@@ -3101,7 +3102,7 @@
return init_done_;
}
-bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_init_statics,
+bool ClassLinker::InitializeClass(const Handle<mirror::Class>& klass, bool can_init_statics,
bool can_init_parents) {
// see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
@@ -3113,7 +3114,7 @@
}
// Fast fail if initialization requires a full runtime. Not part of the JLS.
- if (!CanWeInitializeClass(klass.get(), can_init_statics, can_init_parents)) {
+ if (!CanWeInitializeClass(klass.Get(), can_init_statics, can_init_parents)) {
return false;
}
@@ -3129,11 +3130,11 @@
// Was the class already found to be erroneous? Done under the lock to match the JLS.
if (klass->IsErroneous()) {
- ThrowEarlierClassFailure(klass.get());
+ ThrowEarlierClassFailure(klass.Get());
return false;
}
- CHECK(klass->IsResolved()) << PrettyClass(klass.get()) << ": state=" << klass->GetStatus();
+ CHECK(klass->IsResolved()) << PrettyClass(klass.Get()) << ": state=" << klass->GetStatus();
if (!klass->IsVerified()) {
VerifyClass(klass);
@@ -3170,7 +3171,7 @@
return false;
}
- CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass.get());
+ CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass.Get());
// From here out other threads may observe that we're initializing and so changes of state
// require the a notification.
@@ -3186,14 +3187,15 @@
if (!super_class->IsInitialized()) {
CHECK(!super_class->IsInterface());
CHECK(can_init_parents);
- SirtRef<mirror::Class> sirt_super(self, super_class);
- bool super_initialized = InitializeClass(sirt_super, can_init_statics, true);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
+ bool super_initialized = InitializeClass(handle_scope_super, can_init_statics, true);
if (!super_initialized) {
// The super class was verified ahead of entering initializing, we should only be here if
// the super class became erroneous due to initialization.
- CHECK(sirt_super->IsErroneous() && self->IsExceptionPending())
- << "Super class initialization failed for " << PrettyDescriptor(sirt_super.get())
- << " that has unexpected status " << sirt_super->GetStatus()
+ CHECK(handle_scope_super->IsErroneous() && self->IsExceptionPending())
+ << "Super class initialization failed for " << PrettyDescriptor(handle_scope_super.Get())
+ << " that has unexpected status " << handle_scope_super->GetStatus()
<< "\nPending exception:\n"
<< (self->GetException(NULL) != NULL ? self->GetException(NULL)->Dump() : "");
ObjectLock<mirror::Class> lock(self, &klass);
@@ -3205,19 +3207,20 @@
}
if (klass->NumStaticFields() > 0) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
const DexFile::ClassDef* dex_class_def = kh.GetClassDef();
CHECK(dex_class_def != NULL);
const DexFile& dex_file = kh.GetDexFile();
- SirtRef<mirror::ClassLoader> class_loader(self, klass->GetClassLoader());
- SirtRef<mirror::DexCache> dex_cache(self, kh.GetDexCache());
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(kh.GetDexCache()));
EncodedStaticFieldValueIterator it(dex_file, &dex_cache, &class_loader,
this, *dex_class_def);
if (it.HasNext()) {
CHECK(can_init_statics);
// We reordered the fields, so we need to be able to map the field indexes to the right fields.
SafeMap<uint32_t, mirror::ArtField*> field_map;
- ConstructFieldMap(dex_file, *dex_class_def, klass.get(), field_map);
+ ConstructFieldMap(dex_file, *dex_class_def, klass.Get(), field_map);
for (size_t i = 0; it.HasNext(); i++, it.Next()) {
if (Runtime::Current()->IsActiveTransaction()) {
it.ReadValueToField<true>(field_map.Get(i));
@@ -3255,17 +3258,17 @@
// Set the class as initialized except if failed to initialize static fields.
klass->SetStatus(mirror::Class::kStatusInitialized, self);
if (VLOG_IS_ON(class_linker)) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
LOG(INFO) << "Initialized class " << kh.GetDescriptor() << " from " << kh.GetLocation();
}
// Opportunistically set static method trampolines to their destination.
- FixupStaticTrampolines(klass.get());
+ FixupStaticTrampolines(klass.Get());
}
}
return success;
}
-bool ClassLinker::WaitForInitializeClass(const SirtRef<mirror::Class>& klass, Thread* self,
+bool ClassLinker::WaitForInitializeClass(const Handle<mirror::Class>& klass, Thread* self,
ObjectLock<mirror::Class>& lock)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
while (true) {
@@ -3293,19 +3296,19 @@
// The caller wants an exception, but it was thrown in a
// different thread. Synthesize one here.
ThrowNoClassDefFoundError("<clinit> failed for class %s; see exception in other thread",
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
if (klass->IsInitialized()) {
return true;
}
- LOG(FATAL) << "Unexpected class status. " << PrettyClass(klass.get()) << " is "
+ LOG(FATAL) << "Unexpected class status. " << PrettyClass(klass.Get()) << " is "
<< klass->GetStatus();
}
- LOG(FATAL) << "Not Reached" << PrettyClass(klass.get());
+ LOG(FATAL) << "Not Reached" << PrettyClass(klass.Get());
}
-bool ClassLinker::ValidateSuperClassDescriptors(const SirtRef<mirror::Class>& klass) {
+bool ClassLinker::ValidateSuperClassDescriptors(const Handle<mirror::Class>& klass) {
if (klass->IsInterface()) {
return true;
}
@@ -3319,8 +3322,8 @@
super_mh.ChangeMethod(klass->GetSuperClass()->GetVTable()->GetWithoutChecks(i));
bool is_override = mh.GetMethod() != super_mh.GetMethod();
if (is_override && !mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
- ThrowLinkageError(klass.get(), "Class %s method %s resolves differently in superclass %s",
- PrettyDescriptor(klass.get()).c_str(),
+ ThrowLinkageError(klass.Get(), "Class %s method %s resolves differently in superclass %s",
+ PrettyDescriptor(klass.Get()).c_str(),
PrettyMethod(mh.GetMethod()).c_str(),
PrettyDescriptor(klass->GetSuperClass()).c_str());
return false;
@@ -3335,8 +3338,8 @@
super_mh.ChangeMethod(klass->GetIfTable()->GetInterface(i)->GetVirtualMethod(j));
bool is_override = mh.GetMethod() != super_mh.GetMethod();
if (is_override && !mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
- ThrowLinkageError(klass.get(), "Class %s method %s resolves differently in interface %s",
- PrettyDescriptor(klass.get()).c_str(),
+ ThrowLinkageError(klass.Get(), "Class %s method %s resolves differently in interface %s",
+ PrettyDescriptor(klass.Get()).c_str(),
PrettyMethod(mh.GetMethod()).c_str(),
PrettyDescriptor(klass->GetIfTable()->GetInterface(i)).c_str());
return false;
@@ -3347,9 +3350,9 @@
return true;
}
-bool ClassLinker::EnsureInitialized(const SirtRef<mirror::Class>& c, bool can_init_fields,
+bool ClassLinker::EnsureInitialized(const Handle<mirror::Class>& c, bool can_init_fields,
bool can_init_parents) {
- DCHECK(c.get() != NULL);
+ DCHECK(c.Get() != NULL);
if (c->IsInitialized()) {
return true;
}
@@ -3357,7 +3360,7 @@
bool success = InitializeClass(c, can_init_fields, can_init_parents);
if (!success) {
if (can_init_fields && can_init_parents) {
- CHECK(Thread::Current()->IsExceptionPending()) << PrettyClass(c.get());
+ CHECK(Thread::Current()->IsExceptionPending()) << PrettyClass(c.Get());
}
}
return success;
@@ -3368,17 +3371,17 @@
SafeMap<uint32_t, mirror::ArtField*>& field_map) {
const byte* class_data = dex_file.GetClassData(dex_class_def);
ClassDataItemIterator it(dex_file, class_data);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, c->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, c->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(c->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(c->GetClassLoader()));
CHECK(!kMovingFields);
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
field_map.Put(i, ResolveField(dex_file, it.GetMemberIndex(), dex_cache, class_loader, true));
}
}
-bool ClassLinker::LinkClass(Thread* self, const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
+bool ClassLinker::LinkClass(Thread* self, const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces) {
CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
if (!LinkSuperClass(klass)) {
return false;
@@ -3399,22 +3402,22 @@
return true;
}
-bool ClassLinker::LoadSuperAndInterfaces(const SirtRef<mirror::Class>& klass,
+bool ClassLinker::LoadSuperAndInterfaces(const Handle<mirror::Class>& klass,
const DexFile& dex_file) {
CHECK_EQ(mirror::Class::kStatusIdx, klass->GetStatus());
const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
uint16_t super_class_idx = class_def.superclass_idx_;
if (super_class_idx != DexFile::kDexNoIndex16) {
- mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.get());
+ mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.Get());
if (super_class == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return false;
}
// Verify
if (!klass->CanAccess(super_class)) {
- ThrowIllegalAccessError(klass.get(), "Class %s extended by class %s is inaccessible",
+ ThrowIllegalAccessError(klass.Get(), "Class %s extended by class %s is inaccessible",
PrettyDescriptor(super_class).c_str(),
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
klass->SetSuperClass(super_class);
@@ -3423,7 +3426,7 @@
if (interfaces != NULL) {
for (size_t i = 0; i < interfaces->Size(); i++) {
uint16_t idx = interfaces->GetTypeItem(i).type_idx_;
- mirror::Class* interface = ResolveType(dex_file, idx, klass.get());
+ mirror::Class* interface = ResolveType(dex_file, idx, klass.Get());
if (interface == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return false;
@@ -3431,9 +3434,9 @@
// Verify
if (!klass->CanAccess(interface)) {
// TODO: the RI seemed to ignore this in my testing.
- ThrowIllegalAccessError(klass.get(), "Interface %s implemented by class %s is inaccessible",
+ ThrowIllegalAccessError(klass.Get(), "Interface %s implemented by class %s is inaccessible",
PrettyDescriptor(interface).c_str(),
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
}
@@ -3443,33 +3446,33 @@
return true;
}
-bool ClassLinker::LinkSuperClass(const SirtRef<mirror::Class>& klass) {
+bool ClassLinker::LinkSuperClass(const Handle<mirror::Class>& klass) {
CHECK(!klass->IsPrimitive());
mirror::Class* super = klass->GetSuperClass();
- if (klass.get() == GetClassRoot(kJavaLangObject)) {
+ if (klass.Get() == GetClassRoot(kJavaLangObject)) {
if (super != NULL) {
- ThrowClassFormatError(klass.get(), "java.lang.Object must not have a superclass");
+ ThrowClassFormatError(klass.Get(), "java.lang.Object must not have a superclass");
return false;
}
return true;
}
if (super == NULL) {
- ThrowLinkageError(klass.get(), "No superclass defined for class %s",
- PrettyDescriptor(klass.get()).c_str());
+ ThrowLinkageError(klass.Get(), "No superclass defined for class %s",
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
// Verify
if (super->IsFinal() || super->IsInterface()) {
- ThrowIncompatibleClassChangeError(klass.get(), "Superclass %s of %s is %s",
+ ThrowIncompatibleClassChangeError(klass.Get(), "Superclass %s of %s is %s",
PrettyDescriptor(super).c_str(),
- PrettyDescriptor(klass.get()).c_str(),
+ PrettyDescriptor(klass.Get()).c_str(),
super->IsFinal() ? "declared final" : "an interface");
return false;
}
if (!klass->CanAccess(super)) {
- ThrowIllegalAccessError(klass.get(), "Superclass %s is inaccessible to class %s",
+ ThrowIllegalAccessError(klass.Get(), "Superclass %s is inaccessible to class %s",
PrettyDescriptor(super).c_str(),
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
@@ -3485,9 +3488,9 @@
}
// Disallow custom direct subclasses of java.lang.ref.Reference.
if (init_done_ && super == GetClassRoot(kJavaLangRefReference)) {
- ThrowLinkageError(klass.get(),
+ ThrowLinkageError(klass.Get(),
"Class %s attempts to subclass java.lang.ref.Reference, which is not allowed",
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
@@ -3502,13 +3505,13 @@
}
// Populate the class vtable and itable. Compute return type indices.
-bool ClassLinker::LinkMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
+bool ClassLinker::LinkMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces) {
if (klass->IsInterface()) {
// No vtable.
size_t count = klass->NumVirtualMethods();
if (!IsUint(16, count)) {
- ThrowClassFormatError(klass.get(), "Too many methods on interface: %zd", count);
+ ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zd", count);
return false;
}
for (size_t i = 0; i < count; ++i) {
@@ -3523,16 +3526,17 @@
return true;
}
-bool ClassLinker::LinkVirtualMethods(const SirtRef<mirror::Class>& klass) {
+bool ClassLinker::LinkVirtualMethods(const Handle<mirror::Class>& klass) {
Thread* self = Thread::Current();
if (klass->HasSuperClass()) {
uint32_t max_count = klass->NumVirtualMethods() + klass->GetSuperClass()->GetVTable()->GetLength();
size_t actual_count = klass->GetSuperClass()->GetVTable()->GetLength();
CHECK_LE(actual_count, max_count);
// TODO: do not assign to the vtable field until it is fully constructed.
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- vtable(self, klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count));
- if (UNLIKELY(vtable.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
+ hs.NewHandle(klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count)));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3547,7 +3551,7 @@
if (local_mh.HasSameNameAndSignature(&super_mh)) {
if (klass->CanAccessMember(super_method->GetDeclaringClass(), super_method->GetAccessFlags())) {
if (super_method->IsFinal()) {
- ThrowLinkageError(klass.get(), "Method %s overrides final method in class %s",
+ ThrowLinkageError(klass.Get(), "Method %s overrides final method in class %s",
PrettyMethod(local_method).c_str(),
super_mh.GetDeclaringClassDescriptor());
return false;
@@ -3570,29 +3574,30 @@
}
}
if (!IsUint(16, actual_count)) {
- ThrowClassFormatError(klass.get(), "Too many methods defined on class: %zd", actual_count);
+ ThrowClassFormatError(klass.Get(), "Too many methods defined on class: %zd", actual_count);
return false;
}
// Shrink vtable if possible
CHECK_LE(actual_count, max_count);
if (actual_count < max_count) {
- vtable.reset(vtable->CopyOf(self, actual_count));
- if (UNLIKELY(vtable.get() == NULL)) {
+ vtable.Assign(vtable->CopyOf(self, actual_count));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
}
- klass->SetVTable(vtable.get());
+ klass->SetVTable(vtable.Get());
} else {
- CHECK(klass.get() == GetClassRoot(kJavaLangObject));
+ CHECK(klass.Get() == GetClassRoot(kJavaLangObject));
uint32_t num_virtual_methods = klass->NumVirtualMethods();
if (!IsUint(16, num_virtual_methods)) {
- ThrowClassFormatError(klass.get(), "Too many methods: %d", num_virtual_methods);
+ ThrowClassFormatError(klass.Get(), "Too many methods: %d", num_virtual_methods);
return false;
}
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- vtable(self, AllocArtMethodArray(self, num_virtual_methods));
- if (UNLIKELY(vtable.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod> >
+ vtable(hs.NewHandle(AllocArtMethodArray(self, num_virtual_methods)));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3601,13 +3606,13 @@
vtable->Set<false>(i, virtual_method);
virtual_method->SetMethodIndex(i & 0xFFFF);
}
- klass->SetVTable(vtable.get());
+ klass->SetVTable(vtable.Get());
}
return true;
}
-bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
+bool ClassLinker::LinkInterfaceMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces) {
// Set the imt table to be all conflicts by default.
klass->SetImTable(Runtime::Current()->GetDefaultImt());
size_t super_ifcount;
@@ -3619,13 +3624,13 @@
size_t ifcount = super_ifcount;
uint32_t num_interfaces;
{
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
num_interfaces =
- interfaces.get() == nullptr ? kh.NumDirectInterfaces() : interfaces->GetLength();
+ interfaces.Get() == nullptr ? kh.NumDirectInterfaces() : interfaces->GetLength();
ifcount += num_interfaces;
for (size_t i = 0; i < num_interfaces; i++) {
mirror::Class* interface =
- interfaces.get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ interfaces.Get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
ifcount += interface->GetIfTableCount();
}
}
@@ -3652,8 +3657,9 @@
}
}
Thread* self = Thread::Current();
- SirtRef<mirror::IfTable> iftable(self, AllocIfTable(self, ifcount));
- if (UNLIKELY(iftable.get() == NULL)) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
+ if (UNLIKELY(iftable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3667,14 +3673,14 @@
// Flatten the interface inheritance hierarchy.
size_t idx = super_ifcount;
for (size_t i = 0; i < num_interfaces; i++) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
mirror::Class* interface =
- interfaces.get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ interfaces.Get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
DCHECK(interface != NULL);
if (!interface->IsInterface()) {
ClassHelper ih(interface);
- ThrowIncompatibleClassChangeError(klass.get(), "Class %s implements non-interface class %s",
- PrettyDescriptor(klass.get()).c_str(),
+ ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
+ PrettyDescriptor(klass.Get()).c_str(),
PrettyDescriptor(ih.GetDescriptor()).c_str());
return false;
}
@@ -3709,8 +3715,8 @@
}
// Shrink iftable in case duplicates were found
if (idx < ifcount) {
- iftable.reset(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
- if (UNLIKELY(iftable.get() == NULL)) {
+ iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
+ if (UNLIKELY(iftable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3718,7 +3724,7 @@
} else {
CHECK_EQ(idx, ifcount);
}
- klass->SetIfTable(iftable.get());
+ klass->SetIfTable(iftable.Get());
// If we're an interface, we don't need the vtable pointers, so we're done.
if (klass->IsInterface()) {
@@ -3726,8 +3732,9 @@
}
// Allocate imtable
bool imtable_changed = false;
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> > imtable(self, AllocArtMethodArray(self, kImtSize));
- if (UNLIKELY(imtable.get() == NULL)) {
+ Handle<mirror::ObjectArray<mirror::ArtMethod> > imtable(
+ hs.NewHandle(AllocArtMethodArray(self, kImtSize)));
+ if (UNLIKELY(imtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3735,15 +3742,16 @@
for (size_t i = 0; i < ifcount; ++i) {
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- method_array(self, AllocArtMethodArray(self, num_methods));
- if (UNLIKELY(method_array.get() == nullptr)) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod> >
+ method_array(hs.NewHandle(AllocArtMethodArray(self, num_methods)));
+ if (UNLIKELY(method_array.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- iftable->SetMethodArray(i, method_array.get());
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> > vtable(self,
- klass->GetVTableDuringLinking());
+ iftable->SetMethodArray(i, method_array.Get());
+ Handle<mirror::ObjectArray<mirror::ArtMethod> > vtable(
+ hs.NewHandle(klass->GetVTableDuringLinking()));
for (size_t j = 0; j < num_methods; ++j) {
mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
MethodHelper interface_mh(interface_method);
@@ -3761,7 +3769,7 @@
MethodHelper vtable_mh(vtable_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
- ThrowIllegalAccessError(klass.get(),
+ ThrowIllegalAccessError(klass.Get(),
"Method '%s' implementing interface method '%s' is not public",
PrettyMethod(vtable_method).c_str(),
PrettyMethod(interface_method).c_str());
@@ -3780,26 +3788,27 @@
}
}
if (k < 0) {
- SirtRef<mirror::ArtMethod> miranda_method(self, NULL);
+ StackHandleScope<1> hs(self);
+ auto miranda_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
for (size_t mir = 0; mir < miranda_list.size(); mir++) {
mirror::ArtMethod* mir_method = miranda_list[mir];
MethodHelper vtable_mh(mir_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
- miranda_method.reset(miranda_list[mir]);
+ miranda_method.Assign(miranda_list[mir]);
break;
}
}
- if (miranda_method.get() == NULL) {
+ if (miranda_method.Get() == NULL) {
// Point the interface table at a phantom slot.
- miranda_method.reset(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
- if (UNLIKELY(miranda_method.get() == NULL)) {
+ miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
+ if (UNLIKELY(miranda_method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
// TODO: If a methods move then the miranda_list may hold stale references.
- miranda_list.push_back(miranda_method.get());
+ miranda_list.push_back(miranda_method.Get());
}
- method_array->Set<false>(j, miranda_method.get());
+ method_array->Set<false>(j, miranda_method.Get());
}
}
}
@@ -3812,7 +3821,7 @@
imtable->Set<false>(i, imt_conflict_method);
}
}
- klass->SetImTable(imtable.get());
+ klass->SetImTable(imtable.Get());
}
if (!miranda_list.empty()) {
int old_method_count = klass->NumVirtualMethods();
@@ -3829,13 +3838,14 @@
}
klass->SetVirtualMethods(virtuals);
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- vtable(self, klass->GetVTableDuringLinking());
- CHECK(vtable.get() != NULL);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod> > vtable(
+ hs.NewHandle(klass->GetVTableDuringLinking()));
+ CHECK(vtable.Get() != NULL);
int old_vtable_count = vtable->GetLength();
int new_vtable_count = old_vtable_count + miranda_list.size();
- vtable.reset(vtable->CopyOf(self, new_vtable_count));
- if (UNLIKELY(vtable.get() == NULL)) {
+ vtable.Assign(vtable->CopyOf(self, new_vtable_count));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3848,7 +3858,7 @@
vtable->Set<false>(old_vtable_count + i, method);
}
// TODO: do not assign to the vtable field until it is fully constructed.
- klass->SetVTable(vtable.get());
+ klass->SetVTable(vtable.Get());
}
mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
@@ -3861,13 +3871,13 @@
return true;
}
-bool ClassLinker::LinkInstanceFields(const SirtRef<mirror::Class>& klass) {
- CHECK(klass.get() != NULL);
+bool ClassLinker::LinkInstanceFields(const Handle<mirror::Class>& klass) {
+ CHECK(klass.Get() != NULL);
return LinkFields(klass, false);
}
-bool ClassLinker::LinkStaticFields(const SirtRef<mirror::Class>& klass) {
- CHECK(klass.get() != NULL);
+bool ClassLinker::LinkStaticFields(const Handle<mirror::Class>& klass) {
+ CHECK(klass.Get() != NULL);
size_t allocated_class_size = klass->GetClassSize();
bool success = LinkFields(klass, true);
CHECK_EQ(allocated_class_size, klass->GetClassSize());
@@ -3903,7 +3913,7 @@
}
};
-bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static) {
+bool ClassLinker::LinkFields(const Handle<mirror::Class>& klass, bool is_static) {
size_t num_fields =
is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
@@ -3998,7 +4008,7 @@
// We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
if (!is_static &&
- (strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get()).GetDescriptor()) == 0)) {
+ (strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.Get()).GetDescriptor()) == 0)) {
// We know there are no non-reference fields in the Reference classes, and we know
// that 'referent' is alphabetically last, so this is easy...
CHECK_EQ(num_reference_fields, num_fields);
@@ -4015,7 +4025,7 @@
mirror::ArtField* field = fields->Get(i);
if (false) { // enable to debug field layout
LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
- << " class=" << PrettyClass(klass.get())
+ << " class=" << PrettyClass(klass.Get())
<< " field=" << PrettyField(field)
<< " offset="
<< field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()));
@@ -4023,7 +4033,7 @@
FieldHelper fh(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
bool is_primitive = type != Primitive::kPrimNot;
- if ((strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get()).GetDescriptor()) == 0)
+ if ((strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.Get()).GetDescriptor()) == 0)
&& (strcmp("referent", fh.GetName()) == 0)) {
is_primitive = true; // We lied above, so we have to expect a lie here.
}
@@ -4048,7 +4058,7 @@
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- DCHECK_GE(size, sizeof(mirror::Object)) << ClassHelper(klass.get()).GetDescriptor();
+ DCHECK_GE(size, sizeof(mirror::Object)) << ClassHelper(klass.Get()).GetDescriptor();
size_t previous_size = klass->GetObjectSize();
if (previous_size != 0) {
// Make sure that we didn't originally have an incorrect size.
@@ -4062,7 +4072,7 @@
// Set the bitmap of reference offsets, refOffsets, from the ifields
// list.
-void ClassLinker::CreateReferenceInstanceOffsets(const SirtRef<mirror::Class>& klass) {
+void ClassLinker::CreateReferenceInstanceOffsets(const Handle<mirror::Class>& klass) {
uint32_t reference_offsets = 0;
mirror::Class* super_class = klass->GetSuperClass();
if (super_class != NULL) {
@@ -4076,11 +4086,11 @@
CreateReferenceOffsets(klass, false, reference_offsets);
}
-void ClassLinker::CreateReferenceStaticOffsets(const SirtRef<mirror::Class>& klass) {
+void ClassLinker::CreateReferenceStaticOffsets(const Handle<mirror::Class>& klass) {
CreateReferenceOffsets(klass, true, 0);
}
-void ClassLinker::CreateReferenceOffsets(const SirtRef<mirror::Class>& klass, bool is_static,
+void ClassLinker::CreateReferenceOffsets(const Handle<mirror::Class>& klass, bool is_static,
uint32_t reference_offsets) {
size_t num_reference_fields =
is_static ? klass->NumReferenceStaticFieldsDuringLinking()
@@ -4113,8 +4123,8 @@
}
mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
- const SirtRef<mirror::DexCache>& dex_cache) {
- DCHECK(dex_cache.get() != nullptr);
+ const Handle<mirror::DexCache>& dex_cache) {
+ DCHECK(dex_cache.Get() != nullptr);
mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
if (resolved != NULL) {
return resolved;
@@ -4128,16 +4138,16 @@
mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
mirror::Class* referrer) {
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, referrer->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, referrer->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
return ResolveType(dex_file, type_idx, dex_cache, class_loader);
}
mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader) {
- DCHECK(dex_cache.get() != NULL);
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader) {
+ DCHECK(dex_cache.Get() != NULL);
mirror::Class* resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == NULL) {
Thread* self = Thread::Current();
@@ -4152,12 +4162,13 @@
CHECK(self->IsExceptionPending())
<< "Expected pending exception for failed resolution of: " << descriptor;
// Convert a ClassNotFoundException to a NoClassDefFoundError.
- SirtRef<mirror::Throwable> cause(self, self->GetException(NULL));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
if (cause->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) {
- DCHECK(resolved == NULL); // No SirtRef needed to preserve resolved.
+ DCHECK(resolved == NULL); // No Handle needed to preserve resolved.
self->ClearException();
ThrowNoClassDefFoundError("Failed resolution of: %s", descriptor);
- self->GetException(NULL)->SetCause(cause.get());
+ self->GetException(NULL)->SetCause(cause.Get());
}
}
}
@@ -4168,11 +4179,11 @@
mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
uint32_t method_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
mirror::ArtMethod* referrer,
InvokeType type) {
- DCHECK(dex_cache.get() != NULL);
+ DCHECK(dex_cache.Get() != NULL);
// Check for hit in the dex cache.
mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
if (resolved != NULL && !resolved->IsRuntimeMethod()) {
@@ -4190,15 +4201,15 @@
switch (type) {
case kDirect: // Fall-through.
case kStatic:
- resolved = klass->FindDirectMethod(dex_cache.get(), method_idx);
+ resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx);
break;
case kInterface:
- resolved = klass->FindInterfaceMethod(dex_cache.get(), method_idx);
+ resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
DCHECK(resolved == NULL || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
- resolved = klass->FindVirtualMethod(dex_cache.get(), method_idx);
+ resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
@@ -4314,10 +4325,10 @@
}
mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
bool is_static) {
- DCHECK(dex_cache.get() != nullptr);
+ DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
@@ -4330,9 +4341,9 @@
}
if (is_static) {
- resolved = klass->FindStaticField(dex_cache.get(), field_idx);
+ resolved = klass->FindStaticField(dex_cache.Get(), field_idx);
} else {
- resolved = klass->FindInstanceField(dex_cache.get(), field_idx);
+ resolved = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
if (resolved == NULL) {
@@ -4354,9 +4365,9 @@
mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader) {
- DCHECK(dex_cache.get() != nullptr);
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader) {
+ DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 2c6873e..3dac6e5 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -47,7 +47,7 @@
class InternTable;
template<class T> class ObjectLock;
class ScopedObjectAccess;
-template<class T> class SirtRef;
+template<class T> class Handle;
typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
@@ -75,7 +75,7 @@
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
mirror::Class* FindClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
@@ -92,7 +92,7 @@
// Define a new a class based on a ClassDef from a DexFile
mirror::Class* DefineClass(const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::ClassLoader>& class_loader,
const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -136,7 +136,7 @@
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache.
mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
- const SirtRef<mirror::DexCache>& dex_cache)
+ const Handle<mirror::DexCache>& dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
@@ -159,8 +159,8 @@
// type, since it may be referenced from but not contained within
// the given DexFile.
mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a method with a given ID from the DexFile, storing the
@@ -170,8 +170,8 @@
// virtual method.
mirror::ArtMethod* ResolveMethod(const DexFile& dex_file,
uint32_t method_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
mirror::ArtMethod* referrer,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -191,8 +191,8 @@
// field.
mirror::ArtField* ResolveField(const DexFile& dex_file,
uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -202,8 +202,8 @@
// field resolution semantics are followed.
mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file,
uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get shorty from method index without resolution. Used to do handlerization.
@@ -213,7 +213,7 @@
// Returns true on success, false if there's an exception pending.
// can_run_clinit=false allows the compiler to attempt to init a class,
// given the restriction that no <clinit> execution is possible.
- bool EnsureInitialized(const SirtRef<mirror::Class>& c,
+ bool EnsureInitialized(const Handle<mirror::Class>& c,
bool can_init_fields, bool can_init_parents)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -224,7 +224,7 @@
void RegisterDexFile(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RegisterDexFile(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
+ void RegisterDexFile(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -315,12 +315,12 @@
size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VerifyClass(const SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VerifyClass(const Handle<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
mirror::Class::Status& oat_file_class_status)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
- const SirtRef<mirror::Class>& klass)
+ const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -418,12 +418,12 @@
mirror::Class* CreateArrayClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AppendToBootClassPath(const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AppendToBootClassPath(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
+ void AppendToBootClassPath(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
@@ -435,23 +435,23 @@
void LoadClass(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadClassMembers(const DexFile& dex_file,
const byte* class_data,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
- const SirtRef<mirror::Class>& klass, const SirtRef<mirror::ArtField>& dst)
+ const Handle<mirror::Class>& klass, const Handle<mirror::ArtField>& dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file,
const ClassDataItemIterator& dex_method,
- const SirtRef<mirror::Class>& klass)
+ const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -460,23 +460,23 @@
OatFile::OatClass GetOatClass(const DexFile& dex_file, uint16_t class_def_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RegisterDexFileLocked(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
+ void RegisterDexFileLocked(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDexFileRegisteredLocked(const DexFile& dex_file) const
SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
- bool InitializeClass(const SirtRef<mirror::Class>& klass, bool can_run_clinit,
+ bool InitializeClass(const Handle<mirror::Class>& klass, bool can_run_clinit,
bool can_init_parents)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool WaitForInitializeClass(const SirtRef<mirror::Class>& klass, Thread* self,
+ bool WaitForInitializeClass(const Handle<mirror::Class>& klass, Thread* self,
ObjectLock<mirror::Class>& lock);
- bool ValidateSuperClassDescriptors(const SirtRef<mirror::Class>& klass)
+ bool ValidateSuperClassDescriptors(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor,
- SirtRef<mirror::ClassLoader>& class_loader1,
- SirtRef<mirror::ClassLoader>& class_loader2)
+ Handle<mirror::ClassLoader>& class_loader1,
+ Handle<mirror::ClassLoader>& class_loader2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, mirror::ArtMethod* method,
@@ -484,40 +484,40 @@
mirror::Class* klass2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkClass(Thread* self, const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
+ bool LinkClass(Thread* self, const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkSuperClass(const SirtRef<mirror::Class>& klass)
+ bool LinkSuperClass(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LoadSuperAndInterfaces(const SirtRef<mirror::Class>& klass, const DexFile& dex_file)
+ bool LoadSuperAndInterfaces(const Handle<mirror::Class>& klass, const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
+ bool LinkMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkVirtualMethods(const SirtRef<mirror::Class>& klass)
+ bool LinkVirtualMethods(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
+ bool LinkInterfaceMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkStaticFields(const SirtRef<mirror::Class>& klass)
+ bool LinkStaticFields(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInstanceFields(const SirtRef<mirror::Class>& klass)
+ bool LinkInstanceFields(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkFields(const SirtRef<mirror::Class>& klass, bool is_static)
+ bool LinkFields(const Handle<mirror::Class>& klass, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceInstanceOffsets(const SirtRef<mirror::Class>& klass)
+ void CreateReferenceInstanceOffsets(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceStaticOffsets(const SirtRef<mirror::Class>& klass)
+ void CreateReferenceStaticOffsets(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceOffsets(const SirtRef<mirror::Class>& klass, bool is_static,
+ void CreateReferenceOffsets(const Handle<mirror::Class>& klass, bool is_static,
uint32_t reference_offsets)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -546,11 +546,11 @@
bool* open_failed)
LOCKS_EXCLUDED(dex_lock_);
- mirror::ArtMethod* CreateProxyConstructor(Thread* self, const SirtRef<mirror::Class>& klass,
+ mirror::ArtMethod* CreateProxyConstructor(Thread* self, const Handle<mirror::Class>& klass,
mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* CreateProxyMethod(Thread* self, const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ArtMethod>& prototype)
+ mirror::ArtMethod* CreateProxyMethod(Thread* self, const Handle<mirror::Class>& klass,
+ const Handle<mirror::ArtMethod>& prototype)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 1218357..9970dd5 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -18,7 +18,7 @@
#include <string>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -34,7 +34,7 @@
#include "mirror/proxy.h"
#include "mirror/reference.h"
#include "mirror/stack_trace_element.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -99,9 +99,10 @@
mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
- SirtRef<mirror::Class> array(self,
- class_linker_->FindClass(self, array_descriptor.c_str(), loader));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
+ Handle<mirror::Class> array(
+ hs.NewHandle(class_linker_->FindClass(self, array_descriptor.c_str(), loader)));
ClassHelper array_component_ch(array->GetComponentType());
EXPECT_STREQ(component_type.c_str(), array_component_ch.GetDescriptor());
EXPECT_EQ(class_loader, array->GetClassLoader());
@@ -109,10 +110,10 @@
AssertArrayClass(array_descriptor, array);
}
- void AssertArrayClass(const std::string& array_descriptor, const SirtRef<mirror::Class>& array)
+ void AssertArrayClass(const std::string& array_descriptor, const Handle<mirror::Class>& array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ClassHelper kh(array.get());
- ASSERT_TRUE(array.get() != NULL);
+ ClassHelper kh(array.Get());
+ ASSERT_TRUE(array.Get() != NULL);
ASSERT_TRUE(array->GetClass() != NULL);
ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
EXPECT_TRUE(array->GetClass()->GetSuperClass() != NULL);
@@ -141,17 +142,17 @@
EXPECT_EQ(0U, array->NumVirtualMethods());
EXPECT_EQ(0U, array->NumInstanceFields());
EXPECT_EQ(0U, array->NumStaticFields());
- kh.ChangeClass(array.get());
+ kh.ChangeClass(array.Get());
EXPECT_EQ(2U, kh.NumDirectInterfaces());
EXPECT_TRUE(array->GetVTable() != NULL);
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != NULL);
kh.ChangeClass(kh.GetDirectInterface(0));
EXPECT_STREQ(kh.GetDescriptor(), "Ljava/lang/Cloneable;");
- kh.ChangeClass(array.get());
+ kh.ChangeClass(array.Get());
kh.ChangeClass(kh.GetDirectInterface(1));
EXPECT_STREQ(kh.GetDescriptor(), "Ljava/io/Serializable;");
- EXPECT_EQ(class_linker_->FindArrayClass(self, array->GetComponentType()), array.get());
+ EXPECT_EQ(class_linker_->FindArrayClass(self, array->GetComponentType()), array.Get());
}
void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -182,9 +183,9 @@
EXPECT_TRUE(fh.GetType() != NULL);
}
- void AssertClass(const std::string& descriptor, const SirtRef<mirror::Class>& klass)
+ void AssertClass(const std::string& descriptor, const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
EXPECT_STREQ(descriptor.c_str(), kh.GetDescriptor());
if (descriptor == "Ljava/lang/Object;") {
EXPECT_FALSE(klass->HasSuperClass());
@@ -200,7 +201,7 @@
EXPECT_FALSE(klass->IsErroneous());
EXPECT_FALSE(klass->IsArrayClass());
EXPECT_TRUE(klass->GetComponentType() == NULL);
- EXPECT_TRUE(klass->IsInSamePackage(klass.get()));
+ EXPECT_TRUE(klass->IsInSamePackage(klass.Get()));
EXPECT_TRUE(mirror::Class::IsInSamePackage(kh.GetDescriptor(), kh.GetDescriptor()));
if (klass->IsInterface()) {
EXPECT_TRUE(klass->IsAbstract());
@@ -242,31 +243,31 @@
}
EXPECT_FALSE(klass->IsPrimitive());
- EXPECT_TRUE(klass->CanAccess(klass.get()));
+ EXPECT_TRUE(klass->CanAccess(klass.Get()));
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
mirror::ArtMethod* method = klass->GetDirectMethod(i);
AssertMethod(method);
EXPECT_TRUE(method->IsDirect());
- EXPECT_EQ(klass.get(), method->GetDeclaringClass());
+ EXPECT_EQ(klass.Get(), method->GetDeclaringClass());
}
for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
mirror::ArtMethod* method = klass->GetVirtualMethod(i);
AssertMethod(method);
EXPECT_FALSE(method->IsDirect());
- EXPECT_TRUE(method->GetDeclaringClass()->IsAssignableFrom(klass.get()));
+ EXPECT_TRUE(method->GetDeclaringClass()->IsAssignableFrom(klass.Get()));
}
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
mirror::ArtField* field = klass->GetInstanceField(i);
- AssertField(klass.get(), field);
+ AssertField(klass.Get(), field);
EXPECT_FALSE(field->IsStatic());
}
for (size_t i = 0; i < klass->NumStaticFields(); i++) {
mirror::ArtField* field = klass->GetStaticField(i);
- AssertField(klass.get(), field);
+ AssertField(klass.Get(), field);
EXPECT_TRUE(field->IsStatic());
}
@@ -294,7 +295,7 @@
}
size_t total_num_reference_instance_fields = 0;
- mirror::Class* k = klass.get();
+ mirror::Class* k = klass.Get();
while (k != NULL) {
total_num_reference_instance_fields += k->NumReferenceInstanceFields();
k = k->GetSuperClass();
@@ -306,12 +307,14 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != NULL);
Thread* self = Thread::Current();
- SirtRef<mirror::Class> klass(self, class_linker_->FindSystemClass(self, descriptor.c_str()));
- ASSERT_TRUE(klass.get() != nullptr);
- EXPECT_STREQ(descriptor.c_str(), ClassHelper(klass.get()).GetDescriptor());
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker_->FindSystemClass(self, descriptor.c_str())));
+ ASSERT_TRUE(klass.Get() != nullptr);
+ EXPECT_STREQ(descriptor.c_str(), ClassHelper(klass.Get()).GetDescriptor());
EXPECT_EQ(class_loader, klass->GetClassLoader());
if (klass->IsPrimitive()) {
- AssertPrimitiveClass(descriptor, klass.get());
+ AssertPrimitiveClass(descriptor, klass.Get());
} else if (klass->IsArrayClass()) {
AssertArrayClass(descriptor, klass);
} else {
@@ -491,9 +494,6 @@
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_core_spill_mask_), "quickCoreSpillMask"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_fp_spill_mask_), "quickFpSpillMask"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_frame_size_in_bytes_), "quickFrameSizeInBytes"));
};
};
@@ -674,7 +674,9 @@
TEST_F(ClassLinkerTest, FindClassNested) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Nested")));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Nested"))));
mirror::Class* outer = class_linker_->FindClass(soa.Self(), "LNested;", class_loader);
ASSERT_TRUE(outer != NULL);
@@ -748,7 +750,9 @@
EXPECT_EQ(0U, JavaLangObject->NumStaticFields());
EXPECT_EQ(0U, kh.NumDirectInterfaces());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass"))));
AssertNonExistentClass("LMyClass;");
mirror::Class* MyClass = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader);
kh.ChangeClass(MyClass);
@@ -759,7 +763,7 @@
ASSERT_STREQ(kh.GetDescriptor(), "LMyClass;");
EXPECT_TRUE(MyClass->GetSuperClass() == JavaLangObject);
EXPECT_TRUE(MyClass->HasSuperClass());
- EXPECT_EQ(class_loader.get(), MyClass->GetClassLoader());
+ EXPECT_EQ(class_loader.Get(), MyClass->GetClassLoader());
EXPECT_EQ(mirror::Class::kStatusResolved, MyClass->GetStatus());
EXPECT_FALSE(MyClass->IsErroneous());
EXPECT_TRUE(MyClass->IsLoaded());
@@ -787,7 +791,7 @@
AssertArrayClass("[Ljava/lang/Object;", "Ljava/lang/Object;", NULL);
// synthesized on the fly
AssertArrayClass("[[C", "[C", NULL);
- AssertArrayClass("[[[LMyClass;", "[[LMyClass;", class_loader.get());
+ AssertArrayClass("[[[LMyClass;", "[[LMyClass;", class_loader.Get());
// or not available at all
AssertNonExistentClass("[[[[LNonExistentClass;");
}
@@ -816,27 +820,28 @@
TEST_F(ClassLinkerTest, ValidatePrimitiveArrayElementsOffset) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::LongArray> long_array(soa.Self(), mirror::LongArray::Alloc(soa.Self(), 0));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::LongArray> long_array(hs.NewHandle(mirror::LongArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[J"), long_array->GetClass());
uintptr_t data_offset = reinterpret_cast<uintptr_t>(long_array->GetData());
EXPECT_TRUE(IsAligned<8>(data_offset)); // Longs require 8 byte alignment
- SirtRef<mirror::DoubleArray> double_array(soa.Self(), mirror::DoubleArray::Alloc(soa.Self(), 0));
+ Handle<mirror::DoubleArray> double_array(hs.NewHandle(mirror::DoubleArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[D"), double_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(double_array->GetData());
EXPECT_TRUE(IsAligned<8>(data_offset)); // Doubles require 8 byte alignment
- SirtRef<mirror::IntArray> int_array(soa.Self(), mirror::IntArray::Alloc(soa.Self(), 0));
+ Handle<mirror::IntArray> int_array(hs.NewHandle(mirror::IntArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[I"), int_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(int_array->GetData());
EXPECT_TRUE(IsAligned<4>(data_offset)); // Ints require 4 byte alignment
- SirtRef<mirror::CharArray> char_array(soa.Self(), mirror::CharArray::Alloc(soa.Self(), 0));
+ Handle<mirror::CharArray> char_array(hs.NewHandle(mirror::CharArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[C"), char_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(char_array->GetData());
EXPECT_TRUE(IsAligned<2>(data_offset)); // Chars require 2 byte alignment
- SirtRef<mirror::ShortArray> short_array(soa.Self(), mirror::ShortArray::Alloc(soa.Self(), 0));
+ Handle<mirror::ShortArray> short_array(hs.NewHandle(mirror::ShortArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[S"), short_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(short_array->GetData());
EXPECT_TRUE(IsAligned<2>(data_offset)); // Shorts require 2 byte alignment
@@ -848,7 +853,8 @@
// Validate that the "value" field is always the 0th field in each of java.lang's box classes.
// This lets UnboxPrimitive avoid searching for the field by name at runtime.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
mirror::Class* c;
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Boolean;", class_loader);
FieldHelper fh(c->GetIFields()->Get(0));
@@ -878,21 +884,25 @@
TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader_1(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
- SirtRef<mirror::ClassLoader> class_loader_2(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader_1(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass"))));
+ Handle<mirror::ClassLoader> class_loader_2(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass"))));
mirror::Class* MyClass_1 = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader_1);
mirror::Class* MyClass_2 = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader_2);
- EXPECT_TRUE(MyClass_1 != NULL);
- EXPECT_TRUE(MyClass_2 != NULL);
+ EXPECT_TRUE(MyClass_1 != nullptr);
+ EXPECT_TRUE(MyClass_2 != nullptr);
EXPECT_NE(MyClass_1, MyClass_2);
}
TEST_F(ClassLinkerTest, StaticFields) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(LoadDex("Statics")));
- SirtRef<mirror::Class> statics(soa.Self(), class_linker_->FindClass(soa.Self(), "LStatics;",
- class_loader));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Statics"))));
+ Handle<mirror::Class> statics(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
class_linker_->EnsureInitialized(statics, true, true);
// Static final primitives that are initialized by a compile-time constant
@@ -907,74 +917,76 @@
FieldHelper fh(s0);
EXPECT_STREQ(ClassHelper(s0->GetClass()).GetDescriptor(), "Ljava/lang/reflect/ArtField;");
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimBoolean);
- EXPECT_EQ(true, s0->GetBoolean(statics.get()));
- s0->SetBoolean<false>(statics.get(), false);
+ EXPECT_EQ(true, s0->GetBoolean(statics.Get()));
+ s0->SetBoolean<false>(statics.Get(), false);
mirror::ArtField* s1 = statics->FindStaticField("s1", "B");
fh.ChangeField(s1);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimByte);
- EXPECT_EQ(5, s1->GetByte(statics.get()));
- s1->SetByte<false>(statics.get(), 6);
+ EXPECT_EQ(5, s1->GetByte(statics.Get()));
+ s1->SetByte<false>(statics.Get(), 6);
mirror::ArtField* s2 = statics->FindStaticField("s2", "C");
fh.ChangeField(s2);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimChar);
- EXPECT_EQ('a', s2->GetChar(statics.get()));
- s2->SetChar<false>(statics.get(), 'b');
+ EXPECT_EQ('a', s2->GetChar(statics.Get()));
+ s2->SetChar<false>(statics.Get(), 'b');
mirror::ArtField* s3 = statics->FindStaticField("s3", "S");
fh.ChangeField(s3);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimShort);
- EXPECT_EQ(-536, s3->GetShort(statics.get()));
- s3->SetShort<false>(statics.get(), -535);
+ EXPECT_EQ(-536, s3->GetShort(statics.Get()));
+ s3->SetShort<false>(statics.Get(), -535);
mirror::ArtField* s4 = statics->FindStaticField("s4", "I");
fh.ChangeField(s4);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimInt);
- EXPECT_EQ(2000000000, s4->GetInt(statics.get()));
- s4->SetInt<false>(statics.get(), 2000000001);
+ EXPECT_EQ(2000000000, s4->GetInt(statics.Get()));
+ s4->SetInt<false>(statics.Get(), 2000000001);
mirror::ArtField* s5 = statics->FindStaticField("s5", "J");
fh.ChangeField(s5);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimLong);
- EXPECT_EQ(0x1234567890abcdefLL, s5->GetLong(statics.get()));
- s5->SetLong<false>(statics.get(), INT64_C(0x34567890abcdef12));
+ EXPECT_EQ(0x1234567890abcdefLL, s5->GetLong(statics.Get()));
+ s5->SetLong<false>(statics.Get(), INT64_C(0x34567890abcdef12));
mirror::ArtField* s6 = statics->FindStaticField("s6", "F");
fh.ChangeField(s6);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimFloat);
- EXPECT_EQ(0.5, s6->GetFloat(statics.get()));
- s6->SetFloat<false>(statics.get(), 0.75);
+ EXPECT_EQ(0.5, s6->GetFloat(statics.Get()));
+ s6->SetFloat<false>(statics.Get(), 0.75);
mirror::ArtField* s7 = statics->FindStaticField("s7", "D");
fh.ChangeField(s7);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimDouble);
- EXPECT_EQ(16777217, s7->GetDouble(statics.get()));
- s7->SetDouble<false>(statics.get(), 16777219);
+ EXPECT_EQ(16777217, s7->GetDouble(statics.Get()));
+ s7->SetDouble<false>(statics.Get(), 16777219);
mirror::ArtField* s8 = statics->FindStaticField("s8", "Ljava/lang/String;");
fh.ChangeField(s8);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimNot);
- EXPECT_TRUE(s8->GetObject(statics.get())->AsString()->Equals("android"));
+ EXPECT_TRUE(s8->GetObject(statics.Get())->AsString()->Equals("android"));
s8->SetObject<false>(s8->GetDeclaringClass(),
mirror::String::AllocFromModifiedUtf8(soa.Self(), "robot"));
// TODO: Remove EXPECT_FALSE when GCC can handle EXPECT_EQ
// http://code.google.com/p/googletest/issues/detail?id=322
- EXPECT_FALSE(s0->GetBoolean(statics.get()));
- EXPECT_EQ(6, s1->GetByte(statics.get()));
- EXPECT_EQ('b', s2->GetChar(statics.get()));
- EXPECT_EQ(-535, s3->GetShort(statics.get()));
- EXPECT_EQ(2000000001, s4->GetInt(statics.get()));
- EXPECT_EQ(INT64_C(0x34567890abcdef12), s5->GetLong(statics.get()));
- EXPECT_EQ(0.75, s6->GetFloat(statics.get()));
- EXPECT_EQ(16777219, s7->GetDouble(statics.get()));
- EXPECT_TRUE(s8->GetObject(statics.get())->AsString()->Equals("robot"));
+ EXPECT_FALSE(s0->GetBoolean(statics.Get()));
+ EXPECT_EQ(6, s1->GetByte(statics.Get()));
+ EXPECT_EQ('b', s2->GetChar(statics.Get()));
+ EXPECT_EQ(-535, s3->GetShort(statics.Get()));
+ EXPECT_EQ(2000000001, s4->GetInt(statics.Get()));
+ EXPECT_EQ(INT64_C(0x34567890abcdef12), s5->GetLong(statics.Get()));
+ EXPECT_EQ(0.75, s6->GetFloat(statics.Get()));
+ EXPECT_EQ(16777219, s7->GetDouble(statics.Get()));
+ EXPECT_TRUE(s8->GetObject(statics.Get())->AsString()->Equals("robot"));
}
TEST_F(ClassLinkerTest, Interfaces) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Interfaces")));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Interfaces"))));
mirror::Class* I = class_linker_->FindClass(soa.Self(), "LInterfaces$I;", class_loader);
mirror::Class* J = class_linker_->FindClass(soa.Self(), "LInterfaces$J;", class_loader);
mirror::Class* K = class_linker_->FindClass(soa.Self(), "LInterfaces$K;", class_loader);
@@ -1035,7 +1047,9 @@
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("StaticsFromCode");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(jclass_loader)[0];
CHECK(dex_file != NULL);
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 79d3690..d7a1667 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -47,7 +47,7 @@
#include "ScopedLocalRef.h"
#include "thread.h"
#include "utils.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "verifier/method_verifier.h"
#include "verifier/method_verifier-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 22a0e22..f6b4891 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -28,6 +28,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
+#include "handle_scope.h"
#include "jdwp/object_registry.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -45,8 +46,7 @@
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
-#include "sirt_ref.h"
-#include "stack_indirect_reference_table.h"
+#include "handle_scope-inl.h"
#include "thread_list.h"
#include "throw_location.h"
#include "utf.h"
@@ -139,7 +139,7 @@
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
}
- Dbg::PostLocationEvent(method, 0, this_object, Dbg::kMethodEntry, nullptr);
+ Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
}
void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
@@ -149,7 +149,7 @@
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
}
- Dbg::PostLocationEvent(method, dex_pc, this_object, Dbg::kMethodExit, &return_value);
+ Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
}
void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
@@ -163,7 +163,7 @@
void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
uint32_t new_dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc);
+ Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
}
void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
@@ -228,6 +228,15 @@
size_t Dbg::full_deoptimization_event_count_ = 0;
size_t Dbg::delayed_full_undeoptimization_count_ = 0;
+// Instrumentation event reference counters.
+size_t Dbg::dex_pc_change_event_ref_count_ = 0;
+size_t Dbg::method_enter_event_ref_count_ = 0;
+size_t Dbg::method_exit_event_ref_count_ = 0;
+size_t Dbg::field_read_event_ref_count_ = 0;
+size_t Dbg::field_write_event_ref_count_ = 0;
+size_t Dbg::exception_catch_event_ref_count_ = 0;
+uint32_t Dbg::instrumentation_events_ = 0;
+
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
@@ -641,14 +650,6 @@
return gDisposed;
}
-// All the instrumentation events the debugger is registered for.
-static constexpr uint32_t kListenerEvents = instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kDexPcMoved |
- instrumentation::Instrumentation::kFieldRead |
- instrumentation::Instrumentation::kFieldWritten |
- instrumentation::Instrumentation::kExceptionCaught;
-
void Dbg::GoActive() {
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
@@ -668,6 +669,12 @@
CHECK_EQ(deoptimization_requests_.size(), 0U);
CHECK_EQ(full_deoptimization_event_count_, 0U);
CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
+ CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
+ CHECK_EQ(method_enter_event_ref_count_, 0U);
+ CHECK_EQ(method_exit_event_ref_count_, 0U);
+ CHECK_EQ(field_read_event_ref_count_, 0U);
+ CHECK_EQ(field_write_event_ref_count_, 0U);
+ CHECK_EQ(exception_catch_event_ref_count_, 0U);
}
Runtime* runtime = Runtime::Current();
@@ -676,7 +683,7 @@
ThreadState old_state = self->SetStateUnsafe(kRunnable);
CHECK_NE(old_state, kRunnable);
runtime->GetInstrumentation()->EnableDeoptimization();
- runtime->GetInstrumentation()->AddListener(&gDebugInstrumentationListener, kListenerEvents);
+ instrumentation_events_ = 0;
gDebuggerActive = true;
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
runtime->GetThreadList()->ResumeAll();
@@ -708,7 +715,11 @@
full_deoptimization_event_count_ = 0U;
delayed_full_undeoptimization_count_ = 0U;
}
- runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, kListenerEvents);
+ if (instrumentation_events_ != 0) {
+ runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
+ instrumentation_events_);
+ instrumentation_events_ = 0;
+ }
runtime->GetInstrumentation()->DisableDeoptimization();
gDebuggerActive = false;
}
@@ -2586,13 +2597,12 @@
}
void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* m, uint32_t dex_pc) {
+ mirror::ArtMethod* m, uint32_t dex_pc,
+ int event_flags, const JValue* return_value) {
if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
return;
}
- int event_flags = 0;
-
if (IsBreakpoint(m, dex_pc)) {
event_flags |= kBreakpoint;
}
@@ -2660,7 +2670,26 @@
// If there's something interesting going on, see if it matches one
// of the debugger filters.
if (event_flags != 0) {
- Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, nullptr);
+ Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
+ }
+}
+
+size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
+ switch (instrumentation_event) {
+ case instrumentation::Instrumentation::kMethodEntered:
+ return &method_enter_event_ref_count_;
+ case instrumentation::Instrumentation::kMethodExited:
+ return &method_exit_event_ref_count_;
+ case instrumentation::Instrumentation::kDexPcMoved:
+ return &dex_pc_change_event_ref_count_;
+ case instrumentation::Instrumentation::kFieldRead:
+ return &field_read_event_ref_count_;
+ case instrumentation::Instrumentation::kFieldWritten:
+ return &field_write_event_ref_count_;
+ case instrumentation::Instrumentation::kExceptionCaught:
+ return &exception_catch_event_ref_count_;
+ default:
+ return nullptr;
}
}
@@ -2671,6 +2700,19 @@
case DeoptimizationRequest::kNothing:
LOG(WARNING) << "Ignoring empty deoptimization request.";
break;
+ case DeoptimizationRequest::kRegisterForEvent:
+ VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
+ request.instrumentation_event);
+ instrumentation->AddListener(&gDebugInstrumentationListener, request.instrumentation_event);
+ instrumentation_events_ |= request.instrumentation_event;
+ break;
+ case DeoptimizationRequest::kUnregisterForEvent:
+ VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
+ request.instrumentation_event);
+ instrumentation->RemoveListener(&gDebugInstrumentationListener,
+ request.instrumentation_event);
+ instrumentation_events_ &= ~request.instrumentation_event;
+ break;
case DeoptimizationRequest::kFullDeoptimization:
VLOG(jdwp) << "Deoptimize the world ...";
instrumentation->DeoptimizeEverything();
@@ -2729,6 +2771,32 @@
void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
switch (req.kind) {
+ case DeoptimizationRequest::kRegisterForEvent: {
+ DCHECK_NE(req.instrumentation_event, 0u);
+ size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+ CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
+ req.instrumentation_event);
+ if (*counter == 0) {
+ VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
+ deoptimization_requests_.size(), req.instrumentation_event);
+ deoptimization_requests_.push_back(req);
+ }
+ *counter = *counter + 1;
+ break;
+ }
+ case DeoptimizationRequest::kUnregisterForEvent: {
+ DCHECK_NE(req.instrumentation_event, 0u);
+ size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+ CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
+ req.instrumentation_event);
+ *counter = *counter - 1;
+ if (*counter == 0) {
+ VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
+ deoptimization_requests_.size(), req.instrumentation_event);
+ deoptimization_requests_.push_back(req);
+ }
+ break;
+ }
case DeoptimizationRequest::kFullDeoptimization: {
DCHECK(req.method == nullptr);
if (full_deoptimization_event_count_ == 0) {
@@ -2809,8 +2877,9 @@
// should never be null. We could just check we never encounter this case.
return false;
}
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
&mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
m->GetAccessFlags(), false, true);
@@ -3341,43 +3410,44 @@
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
- SirtRef<mirror::Object> old_throw_this_object(soa.Self(), NULL);
- SirtRef<mirror::ArtMethod> old_throw_method(soa.Self(), NULL);
- SirtRef<mirror::Throwable> old_exception(soa.Self(), NULL);
+ StackHandleScope<4> hs(soa.Self());
+ auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
+ auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
+ auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
uint32_t old_throw_dex_pc;
{
ThrowLocation old_throw_location;
mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.reset(old_throw_location.GetThis());
- old_throw_method.reset(old_throw_location.GetMethod());
- old_exception.reset(old_exception_obj);
+ old_throw_this_object.Assign(old_throw_location.GetThis());
+ old_throw_method.Assign(old_throw_location.GetMethod());
+ old_exception.Assign(old_exception_obj);
old_throw_dex_pc = old_throw_location.GetDexPc();
soa.Self()->ClearException();
}
// Translate the method through the vtable, unless the debugger wants to suppress it.
- SirtRef<mirror::ArtMethod> m(soa.Self(), pReq->method);
+ Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
- mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.get());
- if (actual_method != m.get()) {
- VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.get()) << " to " << PrettyMethod(actual_method);
- m.reset(actual_method);
+ mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
+ if (actual_method != m.Get()) {
+ VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
+ m.Assign(actual_method);
}
}
- VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.get())
+ VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
<< " receiver=" << pReq->receiver
<< " arg_count=" << pReq->arg_count;
- CHECK(m.get() != nullptr);
+ CHECK(m.Get() != nullptr);
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
- pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.get()),
+ pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
reinterpret_cast<jvalue*>(pReq->arg_values));
mirror::Throwable* exception = soa.Self()->GetException(NULL);
soa.Self()->ClearException();
pReq->exception = gRegistry->Add(exception);
- pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m.get()).GetShorty());
+ pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m.Get()).GetShorty());
if (pReq->exception != 0) {
VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
<< " " << exception->Dump();
@@ -3402,10 +3472,10 @@
gRegistry->Add(pReq->result_value.GetL());
}
- if (old_exception.get() != NULL) {
- ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
+ if (old_exception.Get() != NULL) {
+ ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
old_throw_dex_pc);
- soa.Self()->SetException(gc_safe_throw_location, old_exception.get());
+ soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
}
}
@@ -3547,9 +3617,10 @@
} else {
CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::String> name(soa.Self(), t->GetThreadName(soa));
- size_t char_count = (name.get() != NULL) ? name->GetLength() : 0;
- const jchar* chars = (name.get() != NULL) ? name->GetCharArray()->GetData() : NULL;
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
+ size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
+ const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
std::vector<uint8_t> bytes;
JDWP::Append4BE(bytes, t->GetThreadId());
diff --git a/runtime/debugger.h b/runtime/debugger.h
index bef708c..31ffd6e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -129,21 +129,31 @@
DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
};
+// TODO rename to InstrumentationRequest.
struct DeoptimizationRequest {
enum Kind {
kNothing, // no action.
+ kRegisterForEvent, // start listening for instrumentation event.
+ kUnregisterForEvent, // stop listening for instrumentation event.
kFullDeoptimization, // deoptimize everything.
kFullUndeoptimization, // undeoptimize everything.
kSelectiveDeoptimization, // deoptimize one method.
kSelectiveUndeoptimization // undeoptimize one method.
};
- DeoptimizationRequest() : kind(kNothing), method(nullptr) {}
+ DeoptimizationRequest() : kind(kNothing), instrumentation_event(0), method(nullptr) {}
void VisitRoots(RootCallback* callback, void* arg);
Kind kind;
+ // TODO we could use a union to hold the instrumentation_event and the method since they
+ // respectively have sense only for kRegisterForEvent/kUnregisterForEvent and
+ // kSelectiveDeoptimization/kSelectiveUndeoptimization.
+
+ // Event to start or stop listening to. Only for kRegisterForEvent and kUnregisterForEvent.
+ uint32_t instrumentation_event;
+
// Method for selective deoptimization.
mirror::ArtMethod* method;
};
@@ -417,10 +427,6 @@
kMethodEntry = 0x04,
kMethodExit = 0x08,
};
- static void PostLocationEvent(mirror::ArtMethod* method, int pcOffset,
- mirror::Object* thisPtr, int eventFlags,
- const JValue* return_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
mirror::ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -439,7 +445,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void UpdateDebugger(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t new_dex_pc)
+ mirror::ArtMethod* method, uint32_t new_dex_pc,
+ int event_flags, const JValue* return_value)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -561,6 +568,11 @@
static void PostThreadStartOrStop(Thread*, uint32_t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void PostLocationEvent(mirror::ArtMethod* method, int pcOffset,
+ mirror::Object* thisPtr, int eventFlags,
+ const JValue* return_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static JDWP::ObjectId GetThisObjectIdForEvent(mirror::Object* this_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -579,11 +591,13 @@
static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_);
// Guards deoptimization requests.
+ // TODO rename to instrumentation_update_lock.
static Mutex* deoptimization_lock_ ACQUIRED_AFTER(Locks::breakpoint_lock_);
// Deoptimization requests to be processed each time the event list is updated. This is used when
// registering and unregistering events so we do not deoptimize while holding the event list
// lock.
+ // TODO rename to instrumentation_requests.
static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(deoptimization_lock_);
// Count the number of events requiring full deoptimization. When the counter is > 0, everything
@@ -596,6 +610,19 @@
// session.
static size_t delayed_full_undeoptimization_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event);
+
+ // Instrumentation event reference counters.
+ // TODO we could use an array instead of having all these dedicated counters. Instrumentation
+ // events are bits of a mask so we could convert them to array index.
+ static size_t dex_pc_change_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t method_enter_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t method_exit_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t field_read_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t field_write_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t exception_catch_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static uint32_t instrumentation_events_ GUARDED_BY(Locks::mutator_lock_);
+
DISALLOW_COPY_AND_ASSIGN(Dbg);
};
diff --git a/runtime/deoptimize_stack_visitor.cc b/runtime/deoptimize_stack_visitor.cc
index 3eb1792..c7fbc87 100644
--- a/runtime/deoptimize_stack_visitor.cc
+++ b/runtime/deoptimize_stack_visitor.cc
@@ -19,7 +19,7 @@
#include "mirror/art_method-inl.h"
#include "object_utils.h"
#include "quick_exception_handler.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -50,8 +50,9 @@
const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
- SirtRef<mirror::DexCache> dex_cache(self_, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self_, mh.GetClassLoader());
+ StackHandleScope<2> hs(self_);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
&mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
m->GetAccessFlags(), false, true);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 6adfc1f..26b7d07 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -37,9 +37,9 @@
#include "os.h"
#include "safe_map.h"
#include "ScopedFd.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utf-inl.h"
#include "utils.h"
#include "well_known_classes.h"
@@ -1005,8 +1005,8 @@
}
EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
- SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader,
+ Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
ClassLinker* linker,
const DexFile::ClassDef& class_def)
: dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index c782ab1..0146f31 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -28,7 +28,7 @@
#include "mem_map.h"
#include "modifiers.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
@@ -42,8 +42,7 @@
} // namespace mirror
class ClassLinker;
class Signature;
-template <typename T>
-class SirtRef;
+template<class T> class Handle;
class StringPiece;
class ZipArchive;
@@ -1127,8 +1126,8 @@
class EncodedStaticFieldValueIterator {
public:
- EncodedStaticFieldValueIterator(const DexFile& dex_file, SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader,
+ EncodedStaticFieldValueIterator(const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
ClassLinker* linker, const DexFile::ClassDef& class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1163,8 +1162,8 @@
static const byte kEncodedValueArgShift = 5;
const DexFile& dex_file_;
- SirtRef<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects.
- SirtRef<mirror::ClassLoader>* const class_loader_; // ClassLoader to resolve types.
+ Handle<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects.
+ Handle<mirror::ClassLoader>* const class_loader_; // ClassLoader to resolve types.
ClassLinker* linker_; // Linker to resolve literal objects.
size_t array_size_; // Size of array.
size_t pos_; // Current position.
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 9b6859a..86c282e 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -16,7 +16,7 @@
#include "dex_file.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "common_runtime_test.h"
namespace art {
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 528e112..d179c8b 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -22,7 +22,7 @@
#include "dex_file-inl.h"
#include "leb128.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utf-inl.h"
#include "utils.h"
diff --git a/runtime/dex_instruction_visitor_test.cc b/runtime/dex_instruction_visitor_test.cc
index 8f42b0c..99ad3ed 100644
--- a/runtime/dex_instruction_visitor_test.cc
+++ b/runtime/dex_instruction_visitor_test.cc
@@ -18,7 +18,7 @@
#include <iostream>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "gtest/gtest.h"
namespace art {
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index d2a044e..138147b 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -25,7 +25,7 @@
#include "elf_utils.h"
#include "mem_map.h"
#include "os.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index e52a8fb..6998e21 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -30,7 +30,7 @@
#include "mirror/object-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
namespace art {
@@ -72,7 +72,8 @@
}
}
if (UNLIKELY(!klass->IsInitialized())) {
- SirtRef<mirror::Class> sirt_klass(self, klass);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
// EnsureInitialized (the class initializer) might cause a GC.
// may cause us to suspend meaning that another thread may try to
// change the allocator while we are stuck in the entrypoints of
@@ -82,11 +83,11 @@
// has changed and to null-check the return value in case the
// initialization fails.
*slow_path = true;
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
- return sirt_klass.get();
+ return h_klass.Get();
}
return klass;
}
@@ -96,7 +97,8 @@
Thread* self, bool* slow_path)
NO_THREAD_SAFETY_ANALYSIS {
if (UNLIKELY(!klass->IsInitialized())) {
- SirtRef<mirror::Class> sirt_class(self, klass);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
// EnsureInitialized (the class initializer) might cause a GC.
// may cause us to suspend meaning that another thread may try to
// change the allocator while we are stuck in the entrypoints of
@@ -106,11 +108,11 @@
// has changed and to null-check the return value in case the
// initialization fails.
*slow_path = true;
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_class, true, true)) {
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
- return sirt_class.get();
+ return h_class.Get();
}
return klass;
}
@@ -346,14 +348,14 @@
if (LIKELY(fields_class->IsInitialized())) {
return resolved_field;
} else {
- SirtRef<mirror::Class> sirt_class(self, fields_class);
- if (LIKELY(class_linker->EnsureInitialized(sirt_class, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(fields_class));
+ if (LIKELY(class_linker->EnsureInitialized(h_class, true, true))) {
// Otherwise let's ensure the class is initialized before resolving the field.
return resolved_field;
- } else {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind
- return nullptr; // Failure.
}
+ DCHECK(self->IsExceptionPending()); // Throw exception and unwind
+ return nullptr; // Failure.
}
}
}
@@ -386,12 +388,13 @@
mirror::Object* this_object,
mirror::ArtMethod* referrer, Thread* self) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Object> sirt_this(self, type == kStatic ? nullptr : this_object);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> handle_scope_this(hs.NewHandle(type == kStatic ? nullptr : this_object));
mirror::ArtMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type);
if (UNLIKELY(resolved_method == nullptr)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
- } else if (UNLIKELY(sirt_this.get() == nullptr && type != kStatic)) {
+ } else if (UNLIKELY(handle_scope_this.Get() == nullptr && type != kStatic)) {
// Maintain interpreter-like semantics where NullPointerException is thrown
// after potential NoSuchMethodError from class linker.
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -420,7 +423,7 @@
case kDirect:
return resolved_method;
case kVirtual: {
- mirror::ObjectArray<mirror::ArtMethod>* vtable = sirt_this->GetClass()->GetVTable();
+ mirror::ObjectArray<mirror::ArtMethod>* vtable = handle_scope_this->GetClass()->GetVTable();
uint16_t vtable_index = resolved_method->GetMethodIndex();
if (access_check &&
(vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength()))) {
@@ -457,16 +460,16 @@
}
case kInterface: {
uint32_t imt_index = resolved_method->GetDexMethodIndex() % ClassLinker::kImtSize;
- mirror::ObjectArray<mirror::ArtMethod>* imt_table = sirt_this->GetClass()->GetImTable();
+ mirror::ObjectArray<mirror::ArtMethod>* imt_table = handle_scope_this->GetClass()->GetImTable();
mirror::ArtMethod* imt_method = imt_table->Get(imt_index);
if (!imt_method->IsImtConflictMethod()) {
return imt_method;
} else {
mirror::ArtMethod* interface_method =
- sirt_this->GetClass()->FindVirtualMethodForInterface(resolved_method);
+ handle_scope_this->GetClass()->FindVirtualMethodForInterface(resolved_method);
if (UNLIKELY(interface_method == nullptr)) {
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
- sirt_this.get(), referrer);
+ handle_scope_this.Get(), referrer);
return nullptr; // Failure.
} else {
return interface_method;
@@ -625,12 +628,13 @@
if (klass == referring_class && referrer->IsConstructor() && referrer->IsStatic()) {
return klass;
}
- SirtRef<mirror::Class> sirt_class(self, klass);
- if (!class_linker->EnsureInitialized(sirt_class, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ if (!class_linker->EnsureInitialized(h_class, true, true)) {
CHECK(self->IsExceptionPending());
return nullptr; // Failure - Indicate to caller to deliver exception
}
- return sirt_class.get();
+ return h_class.Get();
}
extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index a0ba6b9..3f02ec7 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -34,14 +34,15 @@
mirror::Class* declaringClass = method->GetDeclaringClass();
if (UNLIKELY(!declaringClass->IsInitializing())) {
self->PushShadowFrame(shadow_frame);
- SirtRef<mirror::Class> sirt_c(self, declaringClass);
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true))) {
self->PopShadowFrame();
DCHECK(self->IsExceptionPending());
return;
}
self->PopShadowFrame();
- CHECK(sirt_c->IsInitializing());
+ CHECK(h_class->IsInitializing());
}
}
uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index f1b15b5..17c3222 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -214,8 +214,9 @@
if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
// Ensure static method's class is initialized.
- SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
DCHECK(Thread::Current()->IsExceptionPending());
self->PopManagedStackFragment(fragment);
return 0;
@@ -396,7 +397,8 @@
const void* code = nullptr;
if (LIKELY(!thread->IsExceptionPending())) {
// Ensure that the called method's class is initialized.
- SirtRef<mirror::Class> called_class(thread, called->GetDeclaringClass());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
linker->EnsureInitialized(called_class, true, true);
if (LIKELY(called_class->IsInitialized())) {
code = called->GetEntryPointFromPortableCompiledCode();
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 116957d..9c9cca8 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -65,7 +65,7 @@
JNIEnvExt* env = self->GetJniEnv();
env->locals.SetSegmentState(env->local_ref_cookie);
env->local_ref_cookie = saved_local_ref_cookie;
- self->PopSirt();
+ self->PopHandleScope();
}
extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5d2603f..887bd6f 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -489,8 +489,9 @@
if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
// Ensure static method's class is initialized.
- SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
self->PopManagedStackFragment(fragment);
return 0;
@@ -755,9 +756,10 @@
bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
// Resolve method filling in dex cache.
if (called->IsRuntimeMethod()) {
- SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> handle_scope_receiver(hs.NewHandle(virtual_or_interface ? receiver : nullptr));
called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
- receiver = sirt_receiver.get();
+ receiver = handle_scope_receiver.Get();
}
const void* code = NULL;
if (LIKELY(!self->IsExceptionPending())) {
@@ -796,7 +798,8 @@
}
}
// Ensure that the called method's class is initialized.
- SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
linker->EnsureInitialized(called_class, true, true);
if (LIKELY(called_class->IsInitialized())) {
code = called->GetEntryPointFromQuickCompiledCode();
@@ -857,10 +860,10 @@
*
* void PushStack(uintptr_t): Push a value to the stack.
*
- * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr,
+ * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
* as this might be important for null initialization.
* Must return the jobject, that is, the reference to the
- * entry in the Sirt (nullptr if necessary).
+ * entry in the HandleScope (nullptr if necessary).
*
*/
template <class T> class BuildGenericJniFrameStateMachine {
@@ -956,18 +959,18 @@
}
- bool HaveSirtGpr() {
+ bool HaveHandleScopeGpr() {
return gpr_index_ > 0;
}
- void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uintptr_t sirtRef = PushSirt(ptr);
- if (HaveSirtGpr()) {
+ void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uintptr_t handle = PushHandle(ptr);
+ if (HaveHandleScopeGpr()) {
gpr_index_--;
- PushGpr(sirtRef);
+ PushGpr(handle);
} else {
stack_entries_++;
- PushStack(sirtRef);
+ PushStack(handle);
gpr_index_ = 0;
}
}
@@ -1147,8 +1150,8 @@
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return delegate_->PushSirt(ref);
+ uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return delegate_->PushHandle(ref);
}
uint32_t gpr_index_; // Number of free GPRs
@@ -1160,7 +1163,7 @@
class ComputeGenericJniFrameSize FINAL {
public:
- ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {}
+ ComputeGenericJniFrameSize() : num_handle_scope_references_(0), num_stack_entries_(0) {}
uint32_t GetStackSize() {
return num_stack_entries_ * sizeof(uintptr_t);
@@ -1168,7 +1171,7 @@
// WARNING: After this, *sp won't be pointing to the method anymore!
void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
- void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries,
+ void* sp, HandleScope** table, uint32_t* handle_scope_entries,
uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
void** code_return, size_t* overall_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1179,17 +1182,17 @@
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
// First, fix up the layout of the callee-save frame.
- // We have to squeeze in the Sirt, and relocate the method pointer.
+ // We have to squeeze in the HandleScope, and relocate the method pointer.
// "Free" the slot for the method.
sp8 += kPointerSize;
- // Add the Sirt.
- *sirt_entries = num_sirt_references_;
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_);
- sp8 -= sirt_size;
- *table = reinterpret_cast<StackIndirectReferenceTable*>(sp8);
- (*table)->SetNumberOfReferences(num_sirt_references_);
+ // Add the HandleScope.
+ *handle_scope_entries = num_handle_scope_references_;
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSize(num_handle_scope_references_);
+ sp8 -= handle_scope_size;
+ *table = reinterpret_cast<HandleScope*>(sp8);
+ (*table)->SetNumberOfReferences(num_handle_scope_references_);
// Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
sp8 -= kPointerSize;
@@ -1199,8 +1202,8 @@
// Reference cookie and padding
sp8 -= 8;
- // Store Sirt size
- *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF);
+ // Store HandleScope size
+ *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(handle_scope_size & 0xFFFFFFFF);
// Next comes the native call stack.
sp8 -= GetStackSize();
@@ -1229,7 +1232,7 @@
*(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
}
- void ComputeSirtOffset() { } // nothing to do, static right now
+ void ComputeHandleScopeOffset() { } // nothing to do, static right now
void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1239,13 +1242,13 @@
sm.AdvancePointer(nullptr);
// Class object or this as first argument
- sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
+ sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
for (uint32_t i = 1; i < shorty_len; ++i) {
Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
switch (cur_type_) {
case Primitive::kPrimNot:
- sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
+ sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
break;
case Primitive::kPrimBoolean:
@@ -1288,13 +1291,13 @@
// counting is already done in the superclass
}
- uintptr_t PushSirt(mirror::Object* /* ptr */) {
- num_sirt_references_++;
+ uintptr_t PushHandle(mirror::Object* /* ptr */) {
+ num_handle_scope_references_++;
return reinterpret_cast<uintptr_t>(nullptr);
}
private:
- uint32_t num_sirt_references_;
+ uint32_t num_handle_scope_references_;
uint32_t num_stack_entries_;
};
@@ -1306,26 +1309,26 @@
uint32_t shorty_len, Thread* self) :
QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
ComputeGenericJniFrameSize fsc;
- fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_,
+ fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
&cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
&alloca_used_size_);
- sirt_number_of_references_ = 0;
- cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry());
+ handle_scope_number_of_references_ = 0;
+ cur_hs_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstHandleScopeEntry());
// jni environment is always first argument
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceSirt((**sp)->GetDeclaringClass());
+ sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
}
}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return reinterpret_cast<jobject>(sirt_->GetStackReference(0));
+ jobject GetFirstHandleScopeEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return handle_scope_->GetHandle(0).ToJObject();
}
void PushGpr(uintptr_t val) {
@@ -1349,17 +1352,17 @@
cur_stack_arg_++;
}
- uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uintptr_t tmp;
if (ref == nullptr) {
- *cur_sirt_entry_ = StackReference<mirror::Object>();
+ *cur_hs_entry_ = StackReference<mirror::Object>();
tmp = reinterpret_cast<uintptr_t>(nullptr);
} else {
- *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
- tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
+ *cur_hs_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
+ tmp = reinterpret_cast<uintptr_t>(cur_hs_entry_);
}
- cur_sirt_entry_++;
- sirt_number_of_references_++;
+ cur_hs_entry_++;
+ handle_scope_number_of_references_++;
return tmp;
}
@@ -1373,14 +1376,14 @@
}
private:
- uint32_t sirt_number_of_references_;
- StackReference<mirror::Object>* cur_sirt_entry_;
- StackIndirectReferenceTable* sirt_;
- uint32_t sirt_expected_refs_;
+ uint32_t handle_scope_number_of_references_;
+ StackReference<mirror::Object>* cur_hs_entry_;
+ HandleScope* handle_scope_;
+ uint32_t handle_scope_expected_refs_;
uintptr_t* cur_gpr_reg_;
uint32_t* cur_fpr_reg_;
uintptr_t* cur_stack_arg_;
- // StackReference<mirror::Object>* top_of_sirt_;
+ // StackReference<mirror::Object>* top_of_handle_scope_;
void* code_return_;
size_t alloca_used_size_;
@@ -1416,7 +1419,7 @@
case Primitive::kPrimNot: {
StackReference<mirror::Object>* stack_ref =
reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- sm_.AdvanceSirt(stack_ref->AsMirrorPtr());
+ sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
break;
}
case Primitive::kPrimFloat:
@@ -1435,17 +1438,17 @@
}
}
-void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) {
+void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
// Initialize padding entries.
- while (sirt_number_of_references_ < sirt_expected_refs_) {
- *cur_sirt_entry_ = StackReference<mirror::Object>();
- cur_sirt_entry_++;
- sirt_number_of_references_++;
+ while (handle_scope_number_of_references_ < handle_scope_expected_refs_) {
+ *cur_hs_entry_ = StackReference<mirror::Object>();
+ cur_hs_entry_++;
+ handle_scope_number_of_references_++;
}
- sirt_->SetNumberOfReferences(sirt_expected_refs_);
- DCHECK_NE(sirt_expected_refs_, 0U);
- // Install Sirt.
- self->PushSirt(sirt_);
+ handle_scope_->SetNumberOfReferences(handle_scope_expected_refs_);
+ DCHECK_NE(handle_scope_expected_refs_, 0U);
+ // Install HandleScope.
+ self->PushHandleScope(handle_scope_);
}
extern "C" void* artFindNativeMethod();
@@ -1468,11 +1471,11 @@
/*
* Initializes an alloca region assumed to be directly below sp for a native call:
- * Create a Sirt and call stack and fill a mini stack with values to be pushed to registers.
+ * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
* The final element on the stack is a pointer to the native code.
*
* On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
- * We need to fix this, as the Sirt needs to go into the callee-save frame.
+ * We need to fix this, as the handle scope needs to go into the callee-save frame.
*
* The return of this function denotes:
* 1) How many bytes of the alloca can be released, if the value is non-negative.
@@ -1489,7 +1492,7 @@
BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
self);
visitor.VisitArguments();
- visitor.FinalizeSirt(self);
+ visitor.FinalizeHandleScope(self);
// fix up managed-stack things in Thread
self->SetTopOfStack(sp, 0);
@@ -1499,9 +1502,9 @@
// Start JNI, save the cookie.
uint32_t cookie;
if (called->IsSynchronized()) {
- cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self);
+ cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeEntry(), self);
if (self->IsExceptionPending()) {
- self->PopSirt();
+ self->PopHandleScope();
// A negative value denotes an error.
return -1;
}
@@ -1527,7 +1530,7 @@
DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
// End JNI, as the assembly will move to deliver the exception.
- jobject lock = called->IsSynchronized() ? visitor.GetFirstSirtEntry() : nullptr;
+ jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeEntry() : nullptr;
if (mh.GetShorty()[0] == 'L') {
artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
} else {
@@ -1549,7 +1552,7 @@
}
/*
- * Is called after the native JNI code. Responsible for cleanup (SIRT, saved state) and
+ * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
* unlocking.
*/
extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
@@ -1561,10 +1564,9 @@
jobject lock = nullptr;
if (called->IsSynchronized()) {
- StackIndirectReferenceTable* table =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<uint8_t*>(sp) + kPointerSize);
- lock = reinterpret_cast<jobject>(table->GetStackReference(0));
+ HandleScope* table = reinterpret_cast<HandleScope*>(
+ reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ lock = table->GetHandle(0).ToJObject();
}
MethodHelper mh(called);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 97a8367..37ad9e5 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -25,9 +25,9 @@
#include "mirror/stack_trace_element.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "vmap_table.h"
namespace art {
@@ -38,13 +38,14 @@
CommonRuntimeTest::SetUp();
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("ExceptionHandle")));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("ExceptionHandle"))));
my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader);
ASSERT_TRUE(my_klass_ != NULL);
- SirtRef<mirror::Class> sirt_klass(soa.Self(), my_klass_);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- my_klass_ = sirt_klass.get();
+ Handle<mirror::Class> klass(hs.NewHandle(my_klass_));
+ class_linker_->EnsureInitialized(klass, true, true);
+ my_klass_ = klass.Get();
dex_ = my_klass_->GetDexCache()->GetDexFile();
@@ -72,9 +73,10 @@
const std::vector<uint8_t>& fake_vmap_table_data = fake_vmap_table_data_.GetData();
const std::vector<uint8_t>& fake_mapping_data = fake_mapping_data_.GetData();
- uint32_t vmap_table_offset = sizeof(OatMethodHeader) + fake_vmap_table_data.size();
+ uint32_t vmap_table_offset = sizeof(OatQuickMethodHeader) + fake_vmap_table_data.size();
uint32_t mapping_table_offset = vmap_table_offset + fake_mapping_data.size();
- OatMethodHeader method_header(vmap_table_offset, mapping_table_offset, code_size);
+ OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
+ 4 * kPointerSize, 0u, 0u, code_size);
fake_header_code_and_maps_.resize(sizeof(method_header));
memcpy(&fake_header_code_and_maps_[0], &method_header, sizeof(method_header));
fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
@@ -91,13 +93,11 @@
method_f_ = my_klass_->FindVirtualMethod("f", "()I");
ASSERT_TRUE(method_f_ != NULL);
- method_f_->SetFrameSizeInBytes(4 * kPointerSize);
method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
method_f_->SetNativeGcMap(&fake_gc_map_[0]);
method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
ASSERT_TRUE(method_g_ != NULL);
- method_g_->SetFrameSizeInBytes(4 * kPointerSize);
method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
method_g_->SetNativeGcMap(&fake_gc_map_[0]);
}
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index ea2f7c8..97d3c2f 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -112,7 +112,7 @@
};
-// Statically allocated so the the signal handler can get access to it.
+// Statically allocated so the the signal handler can Get access to it.
extern FaultManager fault_manager;
} // namespace art
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index c79b586..7d8b584 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -23,7 +23,7 @@
#include "atomic.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "mem_map.h"
#include "utils.h"
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 8d5dc07..17e62a6 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -20,7 +20,7 @@
#include "base/mutex.h"
#include "globals.h"
#include "mem_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 7cddaf4..ef5653a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -30,7 +30,7 @@
#include "mirror/object_array-inl.h"
#include "space_bitmap-inl.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
using ::art::mirror::Object;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index bbbd1ed..1def334 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -30,7 +30,7 @@
#include "mirror/object_array-inl.h"
#include "space_bitmap-inl.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 646fce6..a439462 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -28,7 +28,7 @@
#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "space_bitmap-inl.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utils.h"
namespace art {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index a805809..1ccebf5 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -22,7 +22,7 @@
#include "globals.h"
#include "mem_map.h"
#include "object_callbacks.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include <limits.h>
#include <set>
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 972f94d..71db44b 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -21,7 +21,7 @@
#include "common_runtime_test.h"
#include "globals.h"
#include "space_bitmap-inl.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a693659..27c4c17 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -793,7 +793,7 @@
// already in the non-full run set (i.e., it was full) insert it
// into the non-full run set.
if (run != current_runs_[idx]) {
- hash_set<Run*, hash_run, eq_run>* full_runs =
+ unordered_set<Run*, hash_run, eq_run>* full_runs =
kIsDebugBuild ? &full_runs_[idx] : NULL;
std::set<Run*>::iterator pos = non_full_runs->find(run);
if (pos == non_full_runs->end()) {
@@ -1156,11 +1156,11 @@
WriterMutexLock wmu(self, bulk_free_lock_);
// First mark slots to free in the bulk free bit map without locking the
- // size bracket locks. On host, hash_set is faster than vector + flag.
+ // size bracket locks. On host, unordered_set is faster than vector + flag.
#ifdef HAVE_ANDROID_OS
std::vector<Run*> runs;
#else
- hash_set<Run*, hash_run, eq_run> runs;
+ unordered_set<Run*, hash_run, eq_run> runs;
#endif
for (size_t i = 0; i < num_ptrs; i++) {
void* ptr = ptrs[i];
@@ -1267,7 +1267,7 @@
// Check if the run should be moved to non_full_runs_ or
// free_page_runs_.
std::set<Run*>* non_full_runs = &non_full_runs_[idx];
- hash_set<Run*, hash_run, eq_run>* full_runs =
+ unordered_set<Run*, hash_run, eq_run>* full_runs =
kIsDebugBuild ? &full_runs_[idx] : NULL;
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of the
@@ -1281,7 +1281,7 @@
// If it was full, remove it from the full run set (debug
// only.)
if (kIsDebugBuild) {
- hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
+ unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
DCHECK(pos != full_runs->end());
full_runs->erase(pos);
if (kTraceRosAlloc) {
@@ -2054,7 +2054,7 @@
} else {
// If it's full, it must in the full run set (debug build only.)
if (kIsDebugBuild) {
- hash_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
+ unordered_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
CHECK(full_runs.find(this) != full_runs.end())
<< " A full run isn't in the full run set " << Dump();
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 21044f3..9ea4306 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -28,32 +28,21 @@
#include "base/logging.h"
#include "globals.h"
#include "mem_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utils.h"
-// A boilerplate to use hash_map/hash_set both on host and device.
-#ifdef HAVE_ANDROID_OS
-#include <hash_map>
+// Ensure we have an unordered_set until we have worked out C++ library issues.
+#ifdef ART_WITH_STLPORT
#include <hash_set>
-using std::hash_map;
-using std::hash_set;
-#else // HAVE_ANDROID_OS
-#ifdef __DEPRECATED
-#define ROSALLOC_OLD__DEPRECATED __DEPRECATED
-#undef __DEPRECATED
-#endif
-#include <ext/hash_map>
-#include <ext/hash_set>
-#ifdef ROSALLOC_OLD__DEPRECATED
-#define __DEPRECATED ROSALLOC_OLD__DEPRECATED
-#undef ROSALLOC_OLD__DEPRECATED
-#endif
-using __gnu_cxx::hash_map;
-using __gnu_cxx::hash_set;
-#endif // HAVE_ANDROID_OS
+template <class V, class H, class P>
+class unordered_set : public std::hash_set<V, H, P> {};
+#else // ART_WITH_STLPORT
+// TODO: avoid the use of using in a header file.
+#include <unordered_set>
+using std::unordered_set;
+#endif // ART_WITH_STLPORT
namespace art {
-
namespace gc {
namespace allocator {
@@ -462,7 +451,7 @@
std::set<Run*> non_full_runs_[kNumOfSizeBrackets];
// The run sets that hold the runs whose slots are all full. This is
// debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
- hash_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
+ unordered_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
// The set of free pages.
std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
// The dedicated full run, it is always full and shared by all threads when revoking happens.
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index d05f45b..02dd4d9 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -142,7 +142,7 @@
virtual void RevokeAllThreadLocalBuffers() = 0;
// Record that you have freed some objects or large objects, calls Heap::RecordFree.
- // TODO: These are not thread safe, add a lock if we get have parallel sweeping.
+ // TODO: These are not thread safe, add a lock if we get parallel sweeping.
void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
void RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5de7026..cc258f5 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -106,7 +106,7 @@
void MarkSweep::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
- mark_stack_ = heap_->mark_stack_.get();
+ mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
class_count_ = 0;
@@ -123,7 +123,7 @@
mark_fastpath_count_ = 0;
mark_slowpath_count_ = 0;
{
- // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3ebc0af..fd79bf6 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -26,7 +26,7 @@
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
@@ -264,7 +264,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by
+ // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 55140f6..47682cc 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -50,7 +50,7 @@
return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
}
-// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// Used to mark and copy objects. Any newly-marked objects who are in the from space Get moved to
// the to-space and have their forward address updated. Objects which have been newly marked are
// pushed on the mark stack.
template<bool kPoisonReferences>
@@ -72,7 +72,7 @@
forward_address = MarkNonForwardedObject(obj);
DCHECK(forward_address != nullptr);
// Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't get stomped over.
+ // monitor word doesn't Get stomped over.
obj->SetLockWord(
LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
// Push the object onto the mark stack for later processing.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a406f6d..95a2c96 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -142,7 +142,7 @@
void SemiSpace::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
- mark_stack_ = heap_->mark_stack_.get();
+ mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
is_large_object_space_immune_ = false;
@@ -154,7 +154,7 @@
// Set the initial bitmap.
to_space_live_bitmap_ = to_space_->GetLiveBitmap();
{
- // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
@@ -172,7 +172,7 @@
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
if (kStoreStackTraces) {
Locks::mutator_lock_->AssertExclusiveHeld(self_);
- // Store the stack traces into the runtime fault string in case we get a heap corruption
+ // Store the stack traces into the runtime fault string in case we Get a heap corruption
// related crash later.
ThreadState old_state = self_->SetStateUnsafe(kRunnable);
std::ostringstream oss;
@@ -231,7 +231,7 @@
BindBitmaps();
// Process dirty cards and add dirty cards to mod-union tables.
heap_->ProcessCards(timings_, kUseRememberedSet && generational_);
- // Clear the whole card table since we can not get any additional dirty cards during the
+ // Clear the whole card table since we can not Get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
timings_.NewSplit("ClearCardTable");
heap_->GetCardTable()->ClearCardTable();
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9fdf471..dacb5ae 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -25,7 +25,7 @@
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
@@ -242,7 +242,7 @@
// heap. When false, collect only the bump pointer spaces.
bool whole_heap_collection_;
- // How many objects and bytes we moved, used so that we don't need to get the size of the
+ // How many objects and bytes we moved, used so that we don't need to Get the size of the
// to_space_ when calculating how many objects and bytes we freed.
size_t bytes_moved_;
size_t objects_moved_;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index ce51ac5..5a58446 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -49,7 +49,7 @@
void StickyMarkSweep::MarkReachableObjects() {
// All reachable objects must be referenced by a root or a dirty card, so we can clear the mark
- // stack here since all objects in the mark stack will get scanned by the card scanning anyways.
+ // stack here since all objects in the mark stack will Get scanned by the card scanning anyways.
// TODO: Not put these objects in the mark stack in the first place.
mark_stack_->Reset();
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index a06f272..7cee5a0 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -27,7 +27,7 @@
#include "gc/space/large_object_space.h"
#include "gc/space/rosalloc_space-inl.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -144,10 +144,10 @@
mirror::Object** end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
&start_address, &end_address)) {
- // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
- SirtRefNoVerify<mirror::Object> ref(self, *obj);
+ // TODO: Add handle VerifyObject.
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- *obj = ref.get();
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
@@ -159,10 +159,10 @@
// This is safe to do since the GC will never free objects which are neither in the allocation
// stack or the live bitmap.
while (!allocation_stack_->AtomicPushBack(*obj)) {
- // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
- SirtRefNoVerify<mirror::Object> ref(self, *obj);
+ // TODO: Add handle VerifyObject.
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- *obj = ref.get();
}
}
}
@@ -300,11 +300,7 @@
inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
mirror::Object** obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
- // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
- SirtRef<mirror::Object> ref(self, *obj);
- RequestConcurrentGC(self);
- // Restore obj in case it moved.
- *obj = ref.get();
+ RequestConcurrentGCAndSaveObject(self, obj);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7235729..b4c2d14 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -62,9 +62,9 @@
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread_list.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "well_known_classes.h"
namespace art {
@@ -1070,10 +1070,11 @@
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size,
mirror::Class** klass) {
- mirror::Object* ptr = nullptr;
bool was_default_allocator = allocator == GetCurrentAllocator();
DCHECK(klass != nullptr);
- SirtRef<mirror::Class> sirt_klass(self, *klass);
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
+ klass = nullptr; // Invalidate for safety.
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
@@ -1081,31 +1082,32 @@
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
return nullptr;
}
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
}
collector::GcType tried_type = next_gc_type_;
- if (ptr == nullptr) {
- const bool gc_ran =
- CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
- return nullptr;
- }
- if (gc_ran) {
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ const bool gc_ran =
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ return nullptr;
+ }
+ if (gc_ran) {
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
}
}
// Loop through our different Gc types and try to Gc until we get enough free memory.
for (collector::GcType gc_type : gc_plan_) {
- if (ptr != nullptr) {
- break;
- }
if (gc_type == tried_type) {
continue;
}
@@ -1113,40 +1115,41 @@
const bool gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
return nullptr;
}
if (gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
}
}
// Allocations have failed after GCs; this is an exceptional state.
- if (ptr == nullptr) {
- // Try harder, growing the heap if necessary.
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ // Try harder, growing the heap if necessary.
+ mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
}
- if (ptr == nullptr) {
- // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
- // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
- // VM spec requires that all SoftReferences have been collected and cleared before throwing
- // OOME.
- VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
- << " allocation";
- // TODO: Run finalization, but this may cause more allocations to occur.
- // We don't need a WaitForGcToComplete here either.
- DCHECK(!gc_plan_.empty());
- CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
- return nullptr;
- }
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
- if (ptr == nullptr) {
- ThrowOutOfMemoryError(self, alloc_size, false);
- }
+ // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
+ // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
+ // VM spec requires that all SoftReferences have been collected and cleared before throwing
+ // OOME.
+ VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
+ << " allocation";
+ // TODO: Run finalization, but this may cause more allocations to occur.
+ // We don't need a WaitForGcToComplete here either.
+ DCHECK(!gc_plan_.empty());
+ CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ return nullptr;
}
- *klass = sirt_klass.get();
+ ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ if (ptr == nullptr) {
+ ThrowOutOfMemoryError(self, alloc_size, false);
+ }
return ptr;
}
@@ -2536,6 +2539,12 @@
*object = soa.Decode<mirror::Object*>(arg.get());
}
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ RequestConcurrentGC(self);
+}
+
void Heap::RequestConcurrentGC(Thread* self) {
// Make sure that we can do a concurrent GC.
Runtime* runtime = Runtime::Current();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index f71de1a..3b071d1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -412,7 +412,7 @@
return GetTotalMemory() - num_bytes_allocated_;
}
- // Get the space that corresponds to an object's address. Current implementation searches all
+ // get the space that corresponds to an object's address. Current implementation searches all
// spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
// TODO: consider using faster data structure like binary tree.
space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
@@ -582,6 +582,10 @@
mirror::Object** obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ accounting::ObjectStack* GetMarkStack() {
+ return mark_stack_.get();
+ }
+
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
@@ -634,7 +638,10 @@
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(heap_trim_request_lock_);
void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
- void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RequestConcurrentGC(Thread* self)
+ LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
// Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index a85ad4d..8850b92 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -20,7 +20,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
namespace gc {
@@ -43,14 +43,16 @@
ScopedObjectAccess soa(Thread::Current());
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
for (size_t i = 0; i < 1024; ++i) {
- SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.get(), 2048));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object> > array(hs.NewHandle(
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048)));
for (size_t j = 0; j < 2048; ++j) {
mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
- // SIRT operator -> deferences the SIRT before running the method.
+ // handle scope operator -> deferences the handle scope before running the method.
array->Set<false>(j, string);
}
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index a7795e6..446f898 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -245,21 +245,6 @@
return nullptr;
}
- Runtime* runtime = Runtime::Current();
- mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
- runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
- mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
- runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
- mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
- runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
-
- mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kSaveAll);
- callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsOnly);
- callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
- runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsAndArgs);
-
UniquePtr<ImageSpace> space(new ImageSpace(image_filename, image_location,
map.release(), bitmap.release()));
if (kIsDebugBuild) {
@@ -277,6 +262,23 @@
return nullptr;
}
+ Runtime* runtime = Runtime::Current();
+ runtime->SetInstructionSet(space->oat_file_->GetOatHeader().GetInstructionSet());
+
+ mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+ runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
+ mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
+ runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
+ mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
+ runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
+
+ mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kSaveAll);
+ callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsOnly);
+ callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
+ runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "ImageSpace::Init exiting (" << PrettyDuration(NanoTime() - start_time)
<< ") " << *space.get();
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index dc2769e..6c851af 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -20,7 +20,7 @@
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "image.h"
#include "os.h"
#include "space-inl.h"
@@ -140,7 +140,8 @@
size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
MutexLock mu(self, lock_);
MemMaps::iterator found = mem_maps_.find(ptr);
- CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
+ CHECK(found != mem_maps_.end()) << "Attempted to free large object" << ptr
+ << "which was not live";
DCHECK_GE(num_bytes_allocated_, found->second->Size());
size_t allocation_size = found->second->Size();
num_bytes_allocated_ -= allocation_size;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 7493c19..ba46dcc 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -24,7 +24,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index dcf5357..343bc29 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -19,7 +19,7 @@
#include <string>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc/accounting/space_bitmap.h"
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 3335e72..407d362 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -23,7 +23,7 @@
#include "common_runtime_test.h"
#include "globals.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "mirror/array-inl.h"
#include "mirror/object-inl.h"
@@ -48,7 +48,8 @@
}
mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::ClassLoader> null_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (byte_array_class_ == nullptr) {
mirror::Class* byte_array_class =
Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
@@ -62,10 +63,11 @@
mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
if (obj != nullptr) {
- InstallClass(obj, byte_array_class.get(), bytes);
+ InstallClass(obj, byte_array_class.Get(), bytes);
}
return obj;
}
@@ -73,10 +75,11 @@
mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
if (obj != nullptr) {
- InstallClass(obj, byte_array_class.get(), bytes);
+ InstallClass(obj, byte_array_class.Get(), bytes);
}
return obj;
}
@@ -177,9 +180,10 @@
// Succeeds, fits without adjusting the footprint limit.
size_t ptr1_bytes_allocated, ptr1_usable_size;
- SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
- &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Object> ptr1(
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -190,9 +194,9 @@
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated, ptr3_usable_size;
- SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
- &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ Handle<mirror::Object> ptr3(
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -206,23 +210,23 @@
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+ size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
EXPECT_EQ(free3, ptr3_bytes_allocated);
- EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
+ EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
size_t ptr6_bytes_allocated, ptr6_usable_size;
- SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
- &ptr6_usable_size));
- EXPECT_TRUE(ptr6.get() != nullptr);
+ Handle<mirror::Object> ptr6(
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
@@ -243,8 +247,8 @@
AddSpace(space, false);
// Succeeds, fits without adjusting the footprint limit.
- ptr1.reset(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -254,16 +258,16 @@
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- ptr3.reset(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(2U * MB, ptr3_bytes_allocated);
EXPECT_LE(2U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
- space->Free(self, ptr3.reset(nullptr));
+ space->Free(self, ptr3.Assign(nullptr));
// Final clean up.
- free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -279,9 +283,10 @@
// Succeeds, fits without adjusting the footprint limit.
size_t ptr1_bytes_allocated, ptr1_usable_size;
- SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
- &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Object> ptr1(
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -292,9 +297,9 @@
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated, ptr3_usable_size;
- SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
- &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ Handle<mirror::Object> ptr3(
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -308,23 +313,23 @@
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+ size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
EXPECT_EQ(free3, ptr3_bytes_allocated);
- space->Free(self, ptr3.reset(nullptr));
+ space->Free(self, ptr3.Assign(nullptr));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
size_t ptr6_bytes_allocated, ptr6_usable_size;
- SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
- &ptr6_usable_size));
- EXPECT_TRUE(ptr6.get() != nullptr);
+ Handle<mirror::Object> ptr6(
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -345,8 +350,6 @@
lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
&usable_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
- lots_of_objects[i] = obj.get();
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
@@ -360,8 +363,6 @@
size_t allocation_size, usable_size;
lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
- lots_of_objects[i] = obj.get();
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
@@ -418,18 +419,19 @@
alloc_size = size_of_zero_length_byte_array;
}
}
- SirtRef<mirror::Object> object(self, nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto object(hs.NewHandle<mirror::Object>(nullptr));
size_t bytes_allocated = 0;
if (round <= 1) {
- object.reset(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
} else {
- object.reset(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
}
footprint = space->GetFootprint();
EXPECT_GE(space->Size(), footprint); // invariant
- if (object.get() != nullptr) { // allocation succeeded
- lots_of_objects[i] = object.get();
- size_t allocation_size = space->AllocationSize(object.get(), nullptr);
+ if (object.Get() != nullptr) { // allocation succeeded
+ lots_of_objects[i] = object.Get();
+ size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
EXPECT_EQ(bytes_allocated, allocation_size);
if (object_size > 0) {
EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
@@ -509,16 +511,17 @@
space->RevokeAllThreadLocalBuffers();
// All memory was released, try a large allocation to check freed memory is being coalesced
- SirtRef<mirror::Object> large_object(self, nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto large_object(hs.NewHandle<mirror::Object>(nullptr));
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
size_t bytes_allocated = 0;
if (round <= 1) {
- large_object.reset(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
+ large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
} else {
- large_object.reset(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
- nullptr));
+ large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
+ nullptr));
}
- EXPECT_TRUE(large_object.get() != nullptr);
+ EXPECT_TRUE(large_object.Get() != nullptr);
// Sanity check footprint
footprint = space->GetFootprint();
@@ -527,7 +530,7 @@
EXPECT_LE(space->Size(), growth_limit);
// Clean up
- space->Free(self, large_object.reset(nullptr));
+ space->Free(self, large_object.Assign(nullptr));
// Sanity check footprint
footprint = space->GetFootprint();
diff --git a/runtime/handle.h b/runtime/handle.h
new file mode 100644
index 0000000..3127864
--- /dev/null
+++ b/runtime/handle.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HANDLE_H_
+#define ART_RUNTIME_HANDLE_H_
+
+#include "base/casts.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "stack.h"
+
+namespace art {
+
+class Thread;
+
+template<class T>
+class Handle {
+ public:
+ Handle() : reference_(nullptr) {
+ }
+ Handle(const Handle<T>& handle) ALWAYS_INLINE : reference_(handle.reference_) {
+ }
+ Handle<T>& operator=(const Handle<T>& handle) ALWAYS_INLINE {
+ reference_ = handle.reference_;
+ return *this;
+ }
+ explicit Handle(StackReference<T>* reference) ALWAYS_INLINE : reference_(reference) {
+ }
+ T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ return *Get();
+ }
+ T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ return Get();
+ }
+ T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ return reference_->AsMirrorPtr();
+ }
+ T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ T* old = reference_->AsMirrorPtr();
+ reference_->Assign(reference);
+ return old;
+ }
+ jobject ToJObject() const ALWAYS_INLINE {
+ return reinterpret_cast<jobject>(reference_);
+ }
+
+ private:
+ StackReference<T>* reference_;
+
+ template<typename S>
+ explicit Handle(StackReference<S>* reference)
+ : reference_(reinterpret_cast<StackReference<T>*>(reference)) {
+ }
+
+ template<typename S>
+ explicit Handle(const Handle<S>& handle)
+ : reference_(reinterpret_cast<StackReference<T>*>(handle.reference_)) {
+ }
+
+ template<class S> friend class Handle;
+ friend class HandleScope;
+ template<class S> friend class HandleWrapper;
+ template<size_t kNumReferences> friend class StackHandleScope;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_HANDLE_H_
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
new file mode 100644
index 0000000..634f2be
--- /dev/null
+++ b/runtime/handle_scope-inl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HANDLE_SCOPE_INL_H_
+#define ART_RUNTIME_HANDLE_SCOPE_INL_H_
+
+#include "handle_scope-inl.h"
+
+#include "handle.h"
+#include "thread.h"
+
+namespace art {
+
+template<size_t kNumReferences>
+inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self)
+ : HandleScope(kNumReferences), self_(self), pos_(0) {
+ // TODO: Figure out how to use a compile assert.
+ DCHECK_EQ(OFFSETOF_MEMBER(HandleScope, references_),
+ OFFSETOF_MEMBER(StackHandleScope<1>, references_storage_));
+ for (size_t i = 0; i < kNumReferences; ++i) {
+ SetReference(i, nullptr);
+ }
+ self_->PushHandleScope(this);
+}
+
+template<size_t kNumReferences>
+inline StackHandleScope<kNumReferences>::~StackHandleScope() {
+ HandleScope* top_handle_scope = self_->PopHandleScope();
+ DCHECK_EQ(top_handle_scope, this);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
new file mode 100644
index 0000000..27c1bdc
--- /dev/null
+++ b/runtime/handle_scope.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HANDLE_SCOPE_H_
+#define ART_RUNTIME_HANDLE_SCOPE_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "handle.h"
+#include "stack.h"
+#include "utils.h"
+
+namespace art {
+namespace mirror {
+class Object;
+}
+class Thread;
+
+// HandleScopes can be allocated within the bridge frame between managed and native code backed by
+// stack storage or manually allocated in native.
+class HandleScope {
+ public:
+ ~HandleScope() {}
+
+ // Number of references contained within this handle scope.
+ uint32_t NumberOfReferences() const {
+ return number_of_references_;
+ }
+
+ // We have versions with and without explicit pointer size of the following. The first two are
+ // used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one
+ // takes the pointer size explicitly so that at compile time we can cross-compile correctly.
+
+ // Returns the size of a HandleScope containing num_references handles.
+ static size_t SizeOf(uint32_t num_references) {
+ size_t header_size = OFFSETOF_MEMBER(HandleScope, references_);
+ size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
+ return header_size + data_size;
+ }
+
+ // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
+ static size_t GetAlignedHandleScopeSize(uint32_t num_references) {
+ size_t handle_scope_size = SizeOf(num_references);
+ return RoundUp(handle_scope_size, 8);
+ }
+
+ // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
+ static size_t GetAlignedHandleScopeSizeTarget(size_t pointer_size, uint32_t num_references) {
+ // Assume that the layout is packed.
+ size_t header_size = pointer_size + sizeof(number_of_references_);
+ // This assumes there is no layout change between 32 and 64b.
+ size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
+ size_t handle_scope_size = header_size + data_size;
+ return RoundUp(handle_scope_size, 8);
+ }
+
+ // Link to previous HandleScope or null.
+ HandleScope* GetLink() const {
+ return link_;
+ }
+
+ void SetLink(HandleScope* link) {
+ DCHECK_NE(this, link);
+ link_ = link;
+ }
+
+ // Sets the number_of_references_ field for constructing tables out of raw memory. Warning: will
+ // not resize anything.
+ void SetNumberOfReferences(uint32_t num_references) {
+ number_of_references_ = num_references;
+ }
+
+ mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ DCHECK_LT(i, number_of_references_);
+ return references_[i].AsMirrorPtr();
+ }
+
+ Handle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ DCHECK_LT(i, number_of_references_);
+ return Handle<mirror::Object>(&references_[i]);
+ }
+
+ void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ DCHECK_LT(i, number_of_references_);
+ references_[i].Assign(object);
+ }
+
+ bool Contains(StackReference<mirror::Object>* handle_scope_entry) const {
+ // A HandleScope should always contain something. One created by the
+ // jni_compiler should have a jobject/jclass as a native method is
+ // passed in a this pointer or a class
+ DCHECK_GT(number_of_references_, 0U);
+ return ((&references_[0] <= handle_scope_entry)
+ && (handle_scope_entry <= (&references_[number_of_references_ - 1])));
+ }
+
+ // Offset of link within HandleScope, used by generated code
+ static size_t LinkOffset(size_t pointer_size) {
+ return 0;
+ }
+
+ // Offset of length within handle scope, used by generated code
+ static size_t NumberOfReferencesOffset(size_t pointer_size) {
+ return pointer_size;
+ }
+
+ // Offset of link within handle scope, used by generated code
+ static size_t ReferencesOffset(size_t pointer_size) {
+ return pointer_size + sizeof(number_of_references_);
+ }
+
+ protected:
+ explicit HandleScope(size_t number_of_references) :
+ link_(nullptr), number_of_references_(number_of_references) {
+ }
+
+ HandleScope* link_;
+ uint32_t number_of_references_;
+
+ // number_of_references_ are available if this is allocated and filled in by jni_compiler.
+ StackReference<mirror::Object> references_[0];
+
+ private:
+ template<size_t kNumReferences> friend class StackHandleScope;
+ DISALLOW_COPY_AND_ASSIGN(HandleScope);
+};
+
+// A wrapper which wraps around Object** and restores the pointer in the destructor.
+// TODO: Add more functionality.
+template<class T>
+class HandleWrapper {
+ public:
+ HandleWrapper(T** obj, const Handle<T>& handle)
+ : obj_(obj), handle_(handle) {
+ }
+
+ ~HandleWrapper() {
+ *obj_ = handle_.Get();
+ }
+
+ private:
+ T** obj_;
+ Handle<T> handle_;
+};
+
+// Scoped handle storage of a fixed size that is usually stack allocated.
+template<size_t kNumReferences>
+class StackHandleScope : public HandleScope {
+ public:
+ explicit StackHandleScope(Thread* self);
+ ~StackHandleScope();
+
+ template<class T>
+ Handle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetReference(pos_, object);
+ return Handle<T>(GetHandle(pos_++));
+ }
+
+ template<class T>
+ HandleWrapper<T> NewHandleWrapper(T** object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetReference(pos_, *object);
+ Handle<T> h(GetHandle(pos_++));
+ return HandleWrapper<T>(object, h);
+ }
+
+ private:
+ // references_storage_ needs to be first so that it matches the address of references_.
+ StackReference<mirror::Object> references_storage_[kNumReferences];
+ Thread* const self_;
+ size_t pos_;
+
+ template<size_t kNumRefs> friend class StackHandleScope;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_HANDLE_SCOPE_H_
diff --git a/runtime/stack_indirect_reference_table_test.cc b/runtime/handle_scope_test.cc
similarity index 60%
rename from runtime/stack_indirect_reference_table_test.cc
rename to runtime/handle_scope_test.cc
index 72ef6b6..de563c1 100644
--- a/runtime/stack_indirect_reference_table_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -14,33 +14,49 @@
* limitations under the License.
*/
-#include "stack_indirect_reference_table.h"
#include "gtest/gtest.h"
+#include "handle_scope-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
namespace art {
-// Test the offsets computed for members of StackIndirectReferenceTable. Because of cross-compiling
+// Handle scope with a fixed size which is allocated on the stack.
+template<size_t kNumReferences>
+class NoThreadStackHandleScope : public HandleScope {
+ public:
+ explicit NoThreadStackHandleScope() : HandleScope(kNumReferences) {
+ }
+ ~NoThreadStackHandleScope() {
+ }
+
+ private:
+ // references_storage_ needs to be first so that it matches the address of references_
+ StackReference<mirror::Object> references_storage_[kNumReferences];
+};
+
+// Test the offsets computed for members of HandleScope. Because of cross-compiling
// it is impossible the use OFFSETOF_MEMBER, so we do some reasonable computations ourselves. This
// test checks whether we do the right thing.
-TEST(StackIndirectReferenceTableTest, Offsets) {
- // As the members of StackIndirectReferenceTable are private, we cannot use OFFSETOF_MEMBER
+TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS {
+ // As the members of HandleScope are private, we cannot use OFFSETOF_MEMBER
// here. So do the inverse: set some data, and access it through pointers created from the offsets.
-
- StackIndirectReferenceTable test_table(reinterpret_cast<mirror::Object*>(0x1234));
- test_table.SetLink(reinterpret_cast<StackIndirectReferenceTable*>(0x5678));
+ NoThreadStackHandleScope<1> test_table;
+ test_table.SetReference(0, reinterpret_cast<mirror::Object*>(0x1234));
+ test_table.SetLink(reinterpret_cast<HandleScope*>(0x5678));
test_table.SetNumberOfReferences(0x9ABC);
byte* table_base_ptr = reinterpret_cast<byte*>(&test_table);
{
uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
- StackIndirectReferenceTable::LinkOffset(kPointerSize));
+ HandleScope::LinkOffset(kPointerSize));
EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
}
{
uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- StackIndirectReferenceTable::NumberOfReferencesOffset(kPointerSize));
+ HandleScope::NumberOfReferencesOffset(kPointerSize));
EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
}
@@ -50,7 +66,7 @@
EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- StackIndirectReferenceTable::ReferencesOffset(kPointerSize));
+ HandleScope::ReferencesOffset(kPointerSize));
EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
}
}
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 1a28347..42a9757 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -33,7 +33,7 @@
LOG(WARNING) << "Attempt to look up NULL " << kind_;
return false;
}
- if (UNLIKELY(GetIndirectRefKind(iref) == kSirtOrInvalid)) {
+ if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) {
LOG(ERROR) << "JNI ERROR (app bug): invalid " << kind_ << " " << iref;
AbortIfNoCheckJNI();
return false;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index b81e43a..432481b 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -66,7 +66,7 @@
size_t maxCount, IndirectRefKind desiredKind) {
CHECK_GT(initialCount, 0U);
CHECK_LE(initialCount, maxCount);
- CHECK_NE(desiredKind, kSirtOrInvalid);
+ CHECK_NE(desiredKind, kHandleScopeOrInvalid);
std::string error_str;
const size_t initial_bytes = initialCount * sizeof(const mirror::Object*);
@@ -184,9 +184,9 @@
int idx = ExtractIndex(iref);
- if (GetIndirectRefKind(iref) == kSirtOrInvalid &&
- Thread::Current()->SirtContains(reinterpret_cast<jobject>(iref))) {
- LOG(WARNING) << "Attempt to remove local SIRT entry from IRT, ignoring";
+ if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid &&
+ Thread::Current()->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
+ LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring";
return true;
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index f365acc..833b07a 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -111,7 +111,7 @@
* For convenience these match up with enum jobjectRefType from jni.h.
*/
enum IndirectRefKind {
- kSirtOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
+ kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
kLocal = 1, // <<local reference>>
kGlobal = 2, // <<global reference>>
kWeakGlobal = 3 // <<weak global reference>>
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3de0728..5630862 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -238,6 +238,7 @@
bool IsActive() const {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
+ have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_caught_listeners_ || have_method_unwind_listeners_;
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index dfc82dd..2a8cc63 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -22,7 +22,7 @@
#include "mirror/object-inl.h"
#include "mirror/string.h"
#include "thread.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "utf.h"
namespace art {
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 8987127..5995d9e 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -18,7 +18,7 @@
#include "common_runtime_test.h"
#include "mirror/object.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -27,19 +27,21 @@
TEST_F(InternTableTest, Intern) {
ScopedObjectAccess soa(Thread::Current());
InternTable intern_table;
- SirtRef<mirror::String> foo_1(soa.Self(), intern_table.InternStrong(3, "foo"));
- SirtRef<mirror::String> foo_2(soa.Self(), intern_table.InternStrong(3, "foo"));
- SirtRef<mirror::String> foo_3(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> bar(soa.Self(), intern_table.InternStrong(3, "bar"));
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::String> foo_1(hs.NewHandle(intern_table.InternStrong(3, "foo")));
+ Handle<mirror::String> foo_2(hs.NewHandle(intern_table.InternStrong(3, "foo")));
+ Handle<mirror::String> foo_3(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> bar(hs.NewHandle(intern_table.InternStrong(3, "bar")));
EXPECT_TRUE(foo_1->Equals("foo"));
EXPECT_TRUE(foo_2->Equals("foo"));
EXPECT_TRUE(foo_3->Equals("foo"));
- EXPECT_TRUE(foo_1.get() != NULL);
- EXPECT_TRUE(foo_2.get() != NULL);
- EXPECT_EQ(foo_1.get(), foo_2.get());
- EXPECT_NE(foo_1.get(), bar.get());
- EXPECT_NE(foo_2.get(), bar.get());
- EXPECT_NE(foo_3.get(), bar.get());
+ EXPECT_TRUE(foo_1.Get() != NULL);
+ EXPECT_TRUE(foo_2.Get() != NULL);
+ EXPECT_EQ(foo_1.Get(), foo_2.Get());
+ EXPECT_NE(foo_1.Get(), bar.Get());
+ EXPECT_NE(foo_2.Get(), bar.Get());
+ EXPECT_NE(foo_3.Get(), bar.Get());
}
TEST_F(InternTableTest, Size) {
@@ -47,8 +49,10 @@
InternTable t;
EXPECT_EQ(0U, t.Size());
t.InternStrong(3, "foo");
- SirtRef<mirror::String> foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- t.InternWeak(foo.get());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> foo(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ t.InternWeak(foo.Get());
EXPECT_EQ(1U, t.Size());
t.InternStrong(3, "bar");
EXPECT_EQ(2U, t.Size());
@@ -93,19 +97,20 @@
InternTable t;
t.InternStrong(3, "foo");
t.InternStrong(3, "bar");
- SirtRef<mirror::String> hello(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello"));
- SirtRef<mirror::String> world(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "world"));
- SirtRef<mirror::String> s0(soa.Self(), t.InternWeak(hello.get()));
- SirtRef<mirror::String> s1(soa.Self(), t.InternWeak(world.get()));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::String> hello(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello")));
+ Handle<mirror::String> world(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "world")));
+ Handle<mirror::String> s0(hs.NewHandle(t.InternWeak(hello.Get())));
+ Handle<mirror::String> s1(hs.NewHandle(t.InternWeak(world.Get())));
EXPECT_EQ(4U, t.Size());
// We should traverse only the weaks...
TestPredicate p;
- p.Expect(s0.get());
- p.Expect(s1.get());
+ p.Expect(s0.Get());
+ p.Expect(s1.Get());
{
ReaderMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
t.SweepInternTableWeaks(IsMarkedSweepingCallback, &p);
@@ -114,9 +119,9 @@
EXPECT_EQ(2U, t.Size());
// Just check that we didn't corrupt the map.
- SirtRef<mirror::String> still_here(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "still here"));
- t.InternWeak(still_here.get());
+ Handle<mirror::String> still_here(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "still here")));
+ t.InternWeak(still_here.Get());
EXPECT_EQ(3U, t.Size());
}
@@ -125,48 +130,53 @@
{
// Strongs are never weak.
InternTable t;
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_1.Get()));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
{
// Weaks are always weak.
InternTable t;
- SirtRef<mirror::String> foo_1(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> foo_2(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- EXPECT_NE(foo_1.get(), foo_2.get());
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternWeak(foo_1.get()));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternWeak(foo_2.get()));
- EXPECT_TRUE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::String> foo_1(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> foo_2(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ EXPECT_NE(foo_1.Get(), foo_2.Get());
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternWeak(foo_1.Get())));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternWeak(foo_2.Get())));
+ EXPECT_TRUE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
{
// A weak can be promoted to a strong.
InternTable t;
- SirtRef<mirror::String> foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternWeak(foo.get()));
- EXPECT_TRUE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::String> foo(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternWeak(foo.Get())));
+ EXPECT_TRUE(t.ContainsWeak(interned_foo_1.Get()));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
{
// Interning a weak after a strong gets you the strong.
InternTable t;
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<mirror::String> foo(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternWeak(foo.get()));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_1.Get()));
+ Handle<mirror::String> foo(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternWeak(foo.Get())));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index a87f95c..20e2b8d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -71,10 +71,10 @@
} else if (name == "int java.lang.String.fastIndexOf(int, int)") {
result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
} else if (name == "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])") {
- SirtRef<mirror::Class> sirt_class(self, reinterpret_cast<Object*>(args[0])->AsClass());
- SirtRef<mirror::IntArray> sirt_dimensions(self,
- reinterpret_cast<Object*>(args[1])->AsIntArray());
- result->SetL(Array::CreateMultiArray(self, sirt_class, sirt_dimensions));
+ StackHandleScope<2> hs(self);
+ auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
+ auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
+ result->SetL(Array::CreateMultiArray(self, h_class, h_dimensions));
} else if (name == "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()") {
ScopedObjectAccessUnchecked soa(self);
if (Runtime::Current()->IsActiveTransaction()) {
@@ -455,8 +455,9 @@
// Do this after populating the shadow frame in case EnsureInitialized causes a GC.
if (method->IsStatic() && UNLIKELY(!method->GetDeclaringClass()->IsInitializing())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
- if (UNLIKELY(!class_linker->EnsureInitialized(sirt_c, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+ if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
CHECK(self->IsExceptionPending());
self->PopShadowFrame();
return;
@@ -522,7 +523,8 @@
ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
- SirtRef<Class> declaringClass(self, method->GetDeclaringClass());
+ StackHandleScope<1> hs(self);
+ Handle<Class> declaringClass(hs.NewHandle(method->GetDeclaringClass()));
if (UNLIKELY(!declaringClass->IsInitializing())) {
if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaringClass, true,
true))) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ee6a869..c5fb0d8 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -296,7 +296,9 @@
// other variants that take more arguments should also be added.
std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
- SirtRef<ClassLoader> class_loader(self, nullptr); // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
+ StackHandleScope<1> hs(self);
+ // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
+ auto class_loader = hs.NewHandle<ClassLoader>(nullptr);
Class* found = Runtime::Current()->GetClassLinker()->FindClass(self, descriptor.c_str(),
class_loader);
CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
@@ -305,7 +307,9 @@
} else if (name == "java.lang.Class java.lang.Void.lookupType()") {
result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
} else if (name == "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") {
- SirtRef<ClassLoader> class_loader(self, down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset)));
+ StackHandleScope<1> hs(self);
+ Handle<ClassLoader> class_loader(
+ hs.NewHandle(down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset))));
std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset + 1)->AsString()->ToModifiedUtf8().c_str()));
Class* found = Runtime::Current()->GetClassLinker()->FindClass(self, descriptor.c_str(),
@@ -315,10 +319,11 @@
Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
CHECK(c != NULL);
- SirtRef<Object> obj(self, klass->AllocObject(self));
- CHECK(obj.get() != NULL);
- EnterInterpreterFromInvoke(self, c, obj.get(), NULL, NULL);
- result->SetL(obj.get());
+ StackHandleScope<1> hs(self);
+ Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
+ CHECK(obj.Get() != NULL);
+ EnterInterpreterFromInvoke(self, c, obj.Get(), NULL, NULL);
+ result->SetL(obj.Get());
} else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
// Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
// going the reflective Dex way.
@@ -350,13 +355,14 @@
// TODO: getDeclaredField calls GetType once the field is found to ensure a
// NoClassDefFoundError is thrown if the field's type cannot be resolved.
Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
- SirtRef<Object> field(self, jlr_Field->AllocNonMovableObject(self));
- CHECK(field.get() != NULL);
+ StackHandleScope<1> hs(self);
+ Handle<Object> field(hs.NewHandle(jlr_Field->AllocNonMovableObject(self)));
+ CHECK(field.Get() != NULL);
ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
uint32_t args[1];
args[0] = StackReference<mirror::Object>::FromMirrorPtr(found).AsVRegValue();
- EnterInterpreterFromInvoke(self, c, field.get(), args, NULL);
- result->SetL(field.get());
+ EnterInterpreterFromInvoke(self, c, field.Get(), args, NULL);
+ result->SetL(field.Get());
} else if (name == "int java.lang.Object.hashCode()") {
Object* obj = shadow_frame->GetVRegReference(arg_offset);
result->SetI(obj->IdentityHashCode());
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 819b79d..9b03334 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -410,8 +410,9 @@
Class* java_lang_string_class = String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> sirt_class(self, java_lang_string_class);
- if (UNLIKELY(!class_linker->EnsureInitialized(sirt_class, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
+ if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
DCHECK(self->IsExceptionPending());
return nullptr;
}
@@ -571,9 +572,16 @@
ThrowLocation throw_location;
mirror::Throwable* exception = self->GetException(&throw_location);
bool clear_exception = false;
- SirtRef<mirror::Class> exception_class(self, exception->GetClass());
+ bool new_exception = false;
+ StackHandleScope<3> hs(self);
+ Handle<mirror::Class> exception_class(hs.NewHandle(exception->GetClass()));
uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception_class, dex_pc,
- &clear_exception);
+ &clear_exception,
+ &new_exception);
+ if (UNLIKELY(new_exception)) {
+ // Update the exception.
+ exception = self->GetException(&throw_location);
+ }
if (found_dex_pc == DexFile::kDexNoIndex) {
instrumentation->MethodUnwindEvent(self, this_object,
shadow_frame.GetMethod(), dex_pc);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index e425e91..5b7dee1d 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -145,12 +145,14 @@
const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
uint16_t inst_data;
const void* const* currentHandlersTable;
+ bool notified_method_entry_event = false;
UPDATE_HANDLER_TABLE();
if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), 0);
+ notified_method_entry_event = true;
}
}
@@ -255,6 +257,9 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -271,6 +276,9 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -288,6 +296,9 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -304,6 +315,9 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -338,6 +352,9 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -2384,16 +2401,32 @@
}
}
- // Create alternative instruction handlers dedicated to instrumentation.
-#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
- alt_op_##code: { \
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); \
- if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_), \
- shadow_frame.GetMethod(), dex_pc); \
- } \
- UPDATE_HANDLER_TABLE(); \
- goto *handlersTable[instrumentation::kMainHandlerTable][Instruction::code]; \
+// Create alternative instruction handlers dedicated to instrumentation.
+// Return instructions must not call Instrumentation::DexPcMovedEvent since they already call
+// Instrumentation::MethodExited. This is to avoid posting debugger events twice for this location.
+// Note: we do not use the kReturn instruction flag here (to test the instruction is a return). The
+// compiler seems to not evaluate "(Instruction::FlagsOf(Instruction::code) & kReturn) != 0" to
+// a constant condition that would remove the "if" statement so the test is free.
+#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
+ alt_op_##code: { \
+ if (Instruction::code != Instruction::RETURN_VOID && \
+ Instruction::code != Instruction::RETURN_VOID_BARRIER && \
+ Instruction::code != Instruction::RETURN && \
+ Instruction::code != Instruction::RETURN_WIDE && \
+ Instruction::code != Instruction::RETURN_OBJECT) { \
+ if (LIKELY(!notified_method_entry_event)) { \
+ Runtime* runtime = Runtime::Current(); \
+ const instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); \
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
+ Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_); \
+ instrumentation->DexPcMovedEvent(self, this_object, shadow_frame.GetMethod(), dex_pc); \
+ } \
+ } else { \
+ notified_method_entry_event = false; \
+ } \
+ } \
+ UPDATE_HANDLER_TABLE(); \
+ goto *handlersTable[instrumentation::kMainHandlerTable][Instruction::code]; \
}
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUMENTATION_INSTRUCTION_HANDLER)
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 9c13973..859cfc4 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -48,11 +48,20 @@
} while (false)
// Code to run before each dex instruction.
-#define PREAMBLE()
+#define PREAMBLE() \
+ do { \
+ DCHECK(!inst->IsReturn()); \
+ if (UNLIKELY(notified_method_entry_event)) { \
+ notified_method_entry_event = false; \
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_), \
+ shadow_frame.GetMethod(), dex_pc); \
+ } \
+ } while (false)
template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+ ShadowFrame& shadow_frame, JValue result_register) {
bool do_assignability_check = do_access_check;
if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
LOG(FATAL) << "Invalid shadow frame for interpreter use";
@@ -61,11 +70,13 @@
self->VerifyStack();
uint32_t dex_pc = shadow_frame.GetDexPC();
+ bool notified_method_entry_event = false;
const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), 0);
+ notified_method_entry_event = true;
}
}
const uint16_t* const insns = code_item->insns_;
@@ -74,10 +85,6 @@
while (true) {
dex_pc = inst->GetDexPc(insns);
shadow_frame.SetDexPC(dex_pc);
- if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
- }
TraceExecution(shadow_frame, inst, dex_pc, mh);
inst_data = inst->Fetch16(0);
switch (inst->Opcode(inst_data)) {
@@ -163,7 +170,6 @@
break;
}
case Instruction::RETURN_VOID: {
- PREAMBLE();
JValue result;
if (do_access_check) {
// If access checks are required then the dex-to-dex compiler and analysis of
@@ -178,11 +184,13 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN_VOID_BARRIER: {
- PREAMBLE();
QuasiAtomic::MembarStoreLoad();
JValue result;
if (UNLIKELY(self->TestAllFlags())) {
@@ -192,11 +200,13 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN: {
- PREAMBLE();
JValue result;
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
@@ -207,11 +217,13 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN_WIDE: {
- PREAMBLE();
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
if (UNLIKELY(self->TestAllFlags())) {
@@ -221,11 +233,13 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN_OBJECT: {
- PREAMBLE();
JValue result;
if (UNLIKELY(self->TestAllFlags())) {
CheckSuspend(self);
@@ -253,6 +267,9 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
+ } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
+ instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
+ shadow_frame.GetMethod(), dex_pc);
}
return result;
}
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 4e2b0f8..cb2c420 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -136,6 +136,28 @@
}
}
+uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
+ switch (eventKind) {
+ case EK_BREAKPOINT:
+ case EK_SINGLE_STEP:
+ return instrumentation::Instrumentation::kDexPcMoved;
+ case EK_EXCEPTION:
+ case EK_EXCEPTION_CATCH:
+ return instrumentation::Instrumentation::kExceptionCaught;
+ case EK_METHOD_ENTRY:
+ return instrumentation::Instrumentation::kMethodEntered;
+ case EK_METHOD_EXIT:
+ case EK_METHOD_EXIT_WITH_RETURN_VALUE:
+ return instrumentation::Instrumentation::kMethodExited;
+ case EK_FIELD_ACCESS:
+ return instrumentation::Instrumentation::kFieldRead;
+ case EK_FIELD_MODIFICATION:
+ return instrumentation::Instrumentation::kFieldWritten;
+ default:
+ return 0;
+ }
+}
+
/*
* Add an event to the list. Ordering is not important.
*
@@ -148,30 +170,40 @@
CHECK(pEvent->prev == NULL);
CHECK(pEvent->next == NULL);
- /*
- * If one or more "break"-type mods are used, register them with
- * the interpreter.
- */
- DeoptimizationRequest req;
- for (int i = 0; i < pEvent->modCount; i++) {
- const JdwpEventMod* pMod = &pEvent->mods[i];
- if (pMod->modKind == MK_LOCATION_ONLY) {
- /* should only be for Breakpoint, Step, and Exception */
- Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
- } else if (pMod->modKind == MK_STEP) {
- /* should only be for EK_SINGLE_STEP; should only be one */
- JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
- JdwpStepDepth depth = static_cast<JdwpStepDepth>(pMod->step.depth);
- JdwpError status = Dbg::ConfigureStep(pMod->step.threadId, size, depth);
- if (status != ERR_NONE) {
- return status;
+ {
+ /*
+ * If one or more "break"-type mods are used, register them with
+ * the interpreter.
+ */
+ DeoptimizationRequest req;
+ for (int i = 0; i < pEvent->modCount; i++) {
+ const JdwpEventMod* pMod = &pEvent->mods[i];
+ if (pMod->modKind == MK_LOCATION_ONLY) {
+ /* should only be for Breakpoint, Step, and Exception */
+ Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+ } else if (pMod->modKind == MK_STEP) {
+ /* should only be for EK_SINGLE_STEP; should only be one */
+ JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
+ JdwpStepDepth depth = static_cast<JdwpStepDepth>(pMod->step.depth);
+ JdwpError status = Dbg::ConfigureStep(pMod->step.threadId, size, depth);
+ if (status != ERR_NONE) {
+ return status;
+ }
}
}
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+ CHECK(req.method == nullptr);
+ req.kind = DeoptimizationRequest::kFullDeoptimization;
+ }
+ Dbg::RequestDeoptimization(req);
}
- if (NeedsFullDeoptimization(pEvent->eventKind)) {
- CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
- CHECK(req.method == nullptr);
- req.kind = DeoptimizationRequest::kFullDeoptimization;
+ uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
+ if (instrumentation_event != 0) {
+ DeoptimizationRequest req;
+ req.kind = DeoptimizationRequest::kRegisterForEvent;
+ req.instrumentation_event = instrumentation_event;
+ Dbg::RequestDeoptimization(req);
}
{
@@ -187,9 +219,6 @@
++event_list_size_;
}
- // TODO we can do better job here since we should process only one request: the one we just
- // created.
- Dbg::RequestDeoptimization(req);
Dbg::ManageDeoptimization();
return ERR_NONE;
@@ -219,40 +248,48 @@
}
pEvent->prev = NULL;
- /*
- * Unhook us from the interpreter, if necessary.
- */
- DeoptimizationRequest req;
- for (int i = 0; i < pEvent->modCount; i++) {
- JdwpEventMod* pMod = &pEvent->mods[i];
- if (pMod->modKind == MK_LOCATION_ONLY) {
- /* should only be for Breakpoint, Step, and Exception */
- Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+ {
+ /*
+ * Unhook us from the interpreter, if necessary.
+ */
+ DeoptimizationRequest req;
+ for (int i = 0; i < pEvent->modCount; i++) {
+ JdwpEventMod* pMod = &pEvent->mods[i];
+ if (pMod->modKind == MK_LOCATION_ONLY) {
+ /* should only be for Breakpoint, Step, and Exception */
+ Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+ }
+ if (pMod->modKind == MK_STEP) {
+ /* should only be for EK_SINGLE_STEP; should only be one */
+ Dbg::UnconfigureStep(pMod->step.threadId);
+ }
}
- if (pMod->modKind == MK_STEP) {
- /* should only be for EK_SINGLE_STEP; should only be one */
- Dbg::UnconfigureStep(pMod->step.threadId);
+ if (pEvent->eventKind == EK_SINGLE_STEP) {
+ // Special case for single-steps where we want to avoid the slow pattern deoptimize/undeoptimize
+ // loop between each single-step. In a IDE, this would happens each time the user click on the
+ // "single-step" button. Here we delay the full undeoptimization to the next resume
+ // (VM.Resume or ThreadReference.Resume) or the end of the debugging session (VM.Dispose or
+ // runtime shutdown).
+ // Therefore, in a singles-stepping sequence, only the first single-step will trigger a full
+ // deoptimization and only the last single-step will trigger a full undeoptimization.
+ Dbg::DelayFullUndeoptimization();
+ } else if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+ CHECK(req.method == nullptr);
+ req.kind = DeoptimizationRequest::kFullUndeoptimization;
}
+ Dbg::RequestDeoptimization(req);
}
- if (pEvent->eventKind == EK_SINGLE_STEP) {
- // Special case for single-steps where we want to avoid the slow pattern deoptimize/undeoptimize
- // loop between each single-step. In a IDE, this would happens each time the user click on the
- // "single-step" button. Here we delay the full undeoptimization to the next resume
- // (VM.Resume or ThreadReference.Resume) or the end of the debugging session (VM.Dispose or
- // runtime shutdown).
- // Therefore, in a singles-stepping sequence, only the first single-step will trigger a full
- // deoptimization and only the last single-step will trigger a full undeoptimization.
- Dbg::DelayFullUndeoptimization();
- } else if (NeedsFullDeoptimization(pEvent->eventKind)) {
- CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
- CHECK(req.method == nullptr);
- req.kind = DeoptimizationRequest::kFullUndeoptimization;
+ uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
+ if (instrumentation_event != 0) {
+ DeoptimizationRequest req;
+ req.kind = DeoptimizationRequest::kUnregisterForEvent;
+ req.instrumentation_event = instrumentation_event;
+ Dbg::RequestDeoptimization(req);
}
--event_list_size_;
CHECK(event_list_size_ != 0 || event_list_ == NULL);
-
- Dbg::RequestDeoptimization(req);
}
/*
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 4843c2b..00be016 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -32,7 +32,7 @@
#include "jdwp/jdwp_priv.h"
#include "runtime.h"
#include "thread-inl.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 915f2c9..3afb149 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -49,7 +49,7 @@
#include "ScopedLocalRef.h"
#include "thread.h"
#include "utf.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "well_known_classes.h"
namespace art {
@@ -117,11 +117,12 @@
if (LIKELY(klass->IsInitialized())) {
return klass;
}
- SirtRef<mirror::Class> sirt_klass(self, klass);
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
return nullptr;
}
- return sirt_klass.get();
+ return h_klass.Get();
}
static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
@@ -180,16 +181,17 @@
static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name,
const char* sig, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> c(soa.Self(), EnsureInitialized(soa.Self(),
- soa.Decode<mirror::Class*>(jni_class)));
- if (c.get() == nullptr) {
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class))));
+ if (c.Get() == nullptr) {
return nullptr;
}
mirror::ArtField* field = nullptr;
mirror::Class* field_type;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (sig[1] != '\0') {
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), c->GetClassLoader());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(c->GetClassLoader()));
field_type = class_linker->FindClass(soa.Self(), sig, class_loader);
} else {
field_type = class_linker->FindPrimitiveClass(*sig);
@@ -198,13 +200,14 @@
// Failed to find type from the signature of the field.
DCHECK(soa.Self()->IsExceptionPending());
ThrowLocation throw_location;
- SirtRef<mirror::Throwable> cause(soa.Self(), soa.Self()->GetException(&throw_location));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Throwable> cause(hs.NewHandle(soa.Self()->GetException(&throw_location)));
soa.Self()->ClearException();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
"no type \"%s\" found and so no field \"%s\" "
"could be found in class \"%s\" or its superclasses", sig, name,
- ClassHelper(c.get()).GetDescriptor());
- soa.Self()->GetException(nullptr)->SetCause(cause.get());
+ ClassHelper(c.Get()).GetDescriptor());
+ soa.Self()->GetException(nullptr)->SetCause(cause.Get());
return nullptr;
}
if (is_static) {
@@ -216,7 +219,7 @@
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
"no \"%s\" field \"%s\" in class \"%s\" or its superclasses",
- sig, name, ClassHelper(c.get()).GetDescriptor());
+ sig, name, ClassHelper(c.Get()).GetDescriptor());
return nullptr;
}
return soa.EncodeField(field);
@@ -515,16 +518,28 @@
SafeMap<std::string, SharedLibrary*> libraries_;
};
-#define CHECK_NON_NULL_ARGUMENT(value) CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value)
+#define CHECK_NON_NULL_ARGUMENT(value) \
+ CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, nullptr)
-#define CHECK_NON_NULL_ARGUMENT_FN_NAME(name, value) \
+#define CHECK_NON_NULL_ARGUMENT_RETURN_VOID(value) \
+ CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, )
+
+#define CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(value) \
+ CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, 0)
+
+#define CHECK_NON_NULL_ARGUMENT_RETURN(value, return_val) \
+ CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, return_val)
+
+#define CHECK_NON_NULL_ARGUMENT_FN_NAME(name, value, return_val) \
if (UNLIKELY(value == nullptr)) { \
JniAbortF(name, #value " == null"); \
+ return return_val; \
}
#define CHECK_NON_NULL_MEMCPY_ARGUMENT(length, value) \
if (UNLIKELY(length != 0 && value == nullptr)) { \
JniAbortF(__FUNCTION__, #value " == null"); \
+ return; \
}
class JNI {
@@ -546,7 +561,8 @@
ScopedObjectAccess soa(env);
mirror::Class* c = nullptr;
if (runtime->IsStarted()) {
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), GetClassLoader(soa));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetClassLoader(soa)));
c = class_linker->FindClass(soa.Self(), descriptor.c_str(), class_loader);
} else {
c = class_linker->FindSystemClass(soa.Self(), descriptor.c_str());
@@ -610,8 +626,8 @@
}
static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
- CHECK_NON_NULL_ARGUMENT(java_class1);
- CHECK_NON_NULL_ARGUMENT(java_class2);
+ CHECK_NON_NULL_ARGUMENT_RETURN(java_class1, JNI_FALSE);
+ CHECK_NON_NULL_ARGUMENT_RETURN(java_class2, JNI_FALSE);
ScopedObjectAccess soa(env);
mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
@@ -619,7 +635,7 @@
}
static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
- CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT_RETURN(java_class, JNI_FALSE);
if (jobj == nullptr) {
// Note: JNI is different from regular Java instanceof in this respect
return JNI_TRUE;
@@ -643,7 +659,7 @@
}
static jint ThrowNew(JNIEnv* env, jclass c, const char* msg) {
- CHECK_NON_NULL_ARGUMENT(c);
+ CHECK_NON_NULL_ARGUMENT_RETURN(c, JNI_ERR);
return ThrowNewException(env, c, msg, nullptr);
}
@@ -658,26 +674,28 @@
static void ExceptionDescribe(JNIEnv* env) {
ScopedObjectAccess soa(env);
- SirtRef<mirror::Object> old_throw_this_object(soa.Self(), nullptr);
- SirtRef<mirror::ArtMethod> old_throw_method(soa.Self(), nullptr);
- SirtRef<mirror::Throwable> old_exception(soa.Self(), nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ // TODO: Use nullptr instead of null handles?
+ auto old_throw_this_object(hs.NewHandle<mirror::Object>(nullptr));
+ auto old_throw_method(hs.NewHandle<mirror::ArtMethod>(nullptr));
+ auto old_exception(hs.NewHandle<mirror::Throwable>(nullptr));
uint32_t old_throw_dex_pc;
{
ThrowLocation old_throw_location;
mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.reset(old_throw_location.GetThis());
- old_throw_method.reset(old_throw_location.GetMethod());
- old_exception.reset(old_exception_obj);
+ old_throw_this_object.Assign(old_throw_location.GetThis());
+ old_throw_method.Assign(old_throw_location.GetMethod());
+ old_exception.Assign(old_exception_obj);
old_throw_dex_pc = old_throw_location.GetDexPc();
soa.Self()->ClearException();
}
ScopedLocalRef<jthrowable> exception(env,
- soa.AddLocalReference<jthrowable>(old_exception.get()));
+ soa.AddLocalReference<jthrowable>(old_exception.Get()));
ScopedLocalRef<jclass> exception_class(env, env->GetObjectClass(exception.get()));
jmethodID mid = env->GetMethodID(exception_class.get(), "printStackTrace", "()V");
if (mid == nullptr) {
LOG(WARNING) << "JNI WARNING: no printStackTrace()V in "
- << PrettyTypeOf(old_exception.get());
+ << PrettyTypeOf(old_exception.Get());
} else {
env->CallVoidMethod(exception.get(), mid);
if (soa.Self()->IsExceptionPending()) {
@@ -686,10 +704,10 @@
soa.Self()->ClearException();
}
}
- ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
old_throw_dex_pc);
- soa.Self()->SetException(gc_safe_throw_location, old_exception.get());
+ soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
}
static jthrowable ExceptionOccurred(JNIEnv* env) {
@@ -908,8 +926,8 @@
static jboolean CallBooleanMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -917,15 +935,15 @@
}
static jboolean CallBooleanMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetZ();
}
static jboolean CallBooleanMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetZ();
@@ -934,8 +952,8 @@
static jbyte CallByteMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -943,15 +961,15 @@
}
static jbyte CallByteMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetB();
}
static jbyte CallByteMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetB();
@@ -960,8 +978,8 @@
static jchar CallCharMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -969,15 +987,15 @@
}
static jchar CallCharMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetC();
}
static jchar CallCharMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetC();
@@ -986,8 +1004,8 @@
static jdouble CallDoubleMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -995,15 +1013,15 @@
}
static jdouble CallDoubleMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetD();
}
static jdouble CallDoubleMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetD();
@@ -1012,8 +1030,8 @@
static jfloat CallFloatMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1021,15 +1039,15 @@
}
static jfloat CallFloatMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetF();
}
static jfloat CallFloatMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetF();
@@ -1038,8 +1056,8 @@
static jint CallIntMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1047,15 +1065,15 @@
}
static jint CallIntMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetI();
}
static jint CallIntMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetI();
@@ -1064,8 +1082,8 @@
static jlong CallLongMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1073,15 +1091,15 @@
}
static jlong CallLongMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetJ();
}
static jlong CallLongMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetJ();
@@ -1090,8 +1108,8 @@
static jshort CallShortMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1099,15 +1117,15 @@
}
static jshort CallShortMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetS();
}
static jshort CallShortMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetS();
@@ -1116,23 +1134,23 @@
static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap);
va_end(ap);
}
static void CallVoidMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args);
}
static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
}
@@ -1171,8 +1189,8 @@
...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1181,16 +1199,16 @@
static jboolean CallNonvirtualBooleanMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetZ();
}
static jboolean CallNonvirtualBooleanMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetZ();
}
@@ -1198,8 +1216,8 @@
static jbyte CallNonvirtualByteMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1208,16 +1226,16 @@
static jbyte CallNonvirtualByteMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetB();
}
static jbyte CallNonvirtualByteMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetB();
}
@@ -1225,8 +1243,8 @@
static jchar CallNonvirtualCharMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1235,16 +1253,16 @@
static jchar CallNonvirtualCharMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetC();
}
static jchar CallNonvirtualCharMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetC();
}
@@ -1252,8 +1270,8 @@
static jshort CallNonvirtualShortMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1262,16 +1280,16 @@
static jshort CallNonvirtualShortMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetS();
}
static jshort CallNonvirtualShortMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetS();
}
@@ -1279,8 +1297,8 @@
static jint CallNonvirtualIntMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1289,16 +1307,16 @@
static jint CallNonvirtualIntMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetI();
}
static jint CallNonvirtualIntMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetI();
}
@@ -1306,8 +1324,8 @@
static jlong CallNonvirtualLongMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1316,16 +1334,16 @@
static jlong CallNonvirtualLongMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetJ();
}
static jlong CallNonvirtualLongMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetJ();
}
@@ -1333,8 +1351,8 @@
static jfloat CallNonvirtualFloatMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1343,16 +1361,16 @@
static jfloat CallNonvirtualFloatMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetF();
}
static jfloat CallNonvirtualFloatMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetF();
}
@@ -1360,8 +1378,8 @@
static jdouble CallNonvirtualDoubleMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1370,16 +1388,16 @@
static jdouble CallNonvirtualDoubleMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetD();
}
static jdouble CallNonvirtualDoubleMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetD();
}
@@ -1387,8 +1405,8 @@
static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, obj, mid, ap);
va_end(ap);
@@ -1396,16 +1414,16 @@
static void CallNonvirtualVoidMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, obj, mid, args);
}
static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(obj);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
}
@@ -1444,8 +1462,8 @@
}
static void SetObjectField(JNIEnv* env, jobject java_object, jfieldID fid, jobject java_value) {
- CHECK_NON_NULL_ARGUMENT(java_object);
- CHECK_NON_NULL_ARGUMENT(fid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_object);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
mirror::Object* v = soa.Decode<mirror::Object*>(java_value);
@@ -1454,7 +1472,7 @@
}
static void SetStaticObjectField(JNIEnv* env, jclass, jfieldID fid, jobject java_value) {
- CHECK_NON_NULL_ARGUMENT(fid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
ScopedObjectAccess soa(env);
mirror::Object* v = soa.Decode<mirror::Object*>(java_value);
mirror::ArtField* f = soa.DecodeField(fid);
@@ -1462,29 +1480,29 @@
}
#define GET_PRIMITIVE_FIELD(fn, instance) \
- CHECK_NON_NULL_ARGUMENT(instance); \
- CHECK_NON_NULL_ARGUMENT(fid); \
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(instance); \
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
mirror::Object* o = soa.Decode<mirror::Object*>(instance); \
mirror::ArtField* f = soa.DecodeField(fid); \
return f->Get ##fn (o)
#define GET_STATIC_PRIMITIVE_FIELD(fn) \
- CHECK_NON_NULL_ARGUMENT(fid); \
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
mirror::ArtField* f = soa.DecodeField(fid); \
return f->Get ##fn (f->GetDeclaringClass())
#define SET_PRIMITIVE_FIELD(fn, instance, value) \
- CHECK_NON_NULL_ARGUMENT(instance); \
- CHECK_NON_NULL_ARGUMENT(fid); \
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(instance); \
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
mirror::Object* o = soa.Decode<mirror::Object*>(instance); \
mirror::ArtField* f = soa.DecodeField(fid); \
f->Set ##fn <false>(o, value)
#define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
- CHECK_NON_NULL_ARGUMENT(fid); \
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
mirror::ArtField* f = soa.DecodeField(fid); \
f->Set ##fn <false>(f->GetDeclaringClass(), value)
@@ -1645,7 +1663,7 @@
static jboolean CallStaticBooleanMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1653,13 +1671,13 @@
}
static jboolean CallStaticBooleanMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetZ();
}
static jboolean CallStaticBooleanMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetZ();
}
@@ -1667,7 +1685,7 @@
static jbyte CallStaticByteMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1675,13 +1693,13 @@
}
static jbyte CallStaticByteMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetB();
}
static jbyte CallStaticByteMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetB();
}
@@ -1689,7 +1707,7 @@
static jchar CallStaticCharMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1697,13 +1715,13 @@
}
static jchar CallStaticCharMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetC();
}
static jchar CallStaticCharMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetC();
}
@@ -1711,7 +1729,7 @@
static jshort CallStaticShortMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1719,13 +1737,13 @@
}
static jshort CallStaticShortMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetS();
}
static jshort CallStaticShortMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetS();
}
@@ -1733,7 +1751,7 @@
static jint CallStaticIntMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1741,13 +1759,13 @@
}
static jint CallStaticIntMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetI();
}
static jint CallStaticIntMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetI();
}
@@ -1755,7 +1773,7 @@
static jlong CallStaticLongMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1763,13 +1781,13 @@
}
static jlong CallStaticLongMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetJ();
}
static jlong CallStaticLongMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetJ();
}
@@ -1777,7 +1795,7 @@
static jfloat CallStaticFloatMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1785,13 +1803,13 @@
}
static jfloat CallStaticFloatMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetF();
}
static jfloat CallStaticFloatMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetF();
}
@@ -1799,7 +1817,7 @@
static jdouble CallStaticDoubleMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1807,13 +1825,13 @@
}
static jdouble CallStaticDoubleMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetD();
}
static jdouble CallStaticDoubleMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetD();
}
@@ -1821,20 +1839,20 @@
static void CallStaticVoidMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, nullptr, mid, ap);
va_end(ap);
}
static void CallStaticVoidMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, nullptr, mid, args);
}
static void CallStaticVoidMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(mid);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeWithJValues(soa, nullptr, mid, args);
}
@@ -1863,20 +1881,20 @@
}
static jsize GetStringLength(JNIEnv* env, jstring java_string) {
- CHECK_NON_NULL_ARGUMENT(java_string);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(java_string);
ScopedObjectAccess soa(env);
return soa.Decode<mirror::String*>(java_string)->GetLength();
}
static jsize GetStringUTFLength(JNIEnv* env, jstring java_string) {
- CHECK_NON_NULL_ARGUMENT(java_string);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(java_string);
ScopedObjectAccess soa(env);
return soa.Decode<mirror::String*>(java_string)->GetUtfLength();
}
static void GetStringRegion(JNIEnv* env, jstring java_string, jsize start, jsize length,
jchar* buf) {
- CHECK_NON_NULL_ARGUMENT(java_string);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
if (start < 0 || length < 0 || start + length > s->GetLength()) {
@@ -1890,7 +1908,7 @@
static void GetStringUTFRegion(JNIEnv* env, jstring java_string, jsize start, jsize length,
char* buf) {
- CHECK_NON_NULL_ARGUMENT(java_string);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
if (start < 0 || length < 0 || start + length > s->GetLength()) {
@@ -1922,7 +1940,7 @@
}
static void ReleaseStringChars(JNIEnv* env, jstring java_string, const jchar* chars) {
- CHECK_NON_NULL_ARGUMENT(java_string);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
delete[] chars;
ScopedObjectAccess soa(env);
UnpinPrimitiveArray(soa, soa.Decode<mirror::String*>(java_string)->GetCharArray());
@@ -1959,7 +1977,7 @@
}
static jsize GetArrayLength(JNIEnv* env, jarray java_array) {
- CHECK_NON_NULL_ARGUMENT(java_array);
+ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(java_array);
ScopedObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(java_array);
if (UNLIKELY(!obj->IsArrayInstance())) {
@@ -1979,7 +1997,7 @@
static void SetObjectArrayElement(JNIEnv* env, jobjectArray java_array, jsize index,
jobject java_value) {
- CHECK_NON_NULL_ARGUMENT(java_array);
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_array);
ScopedObjectAccess soa(env);
mirror::ObjectArray<mirror::Object>* array =
soa.Decode<mirror::ObjectArray<mirror::Object>*>(java_array);
@@ -1988,38 +2006,31 @@
}
static jbooleanArray NewBooleanArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jbooleanArray, mirror::BooleanArray>(soa, length);
+ return NewPrimitiveArray<jbooleanArray, mirror::BooleanArray>(env, length);
}
static jbyteArray NewByteArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jbyteArray, mirror::ByteArray>(soa, length);
+ return NewPrimitiveArray<jbyteArray, mirror::ByteArray>(env, length);
}
static jcharArray NewCharArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jcharArray, mirror::CharArray>(soa, length);
+ return NewPrimitiveArray<jcharArray, mirror::CharArray>(env, length);
}
static jdoubleArray NewDoubleArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jdoubleArray, mirror::DoubleArray>(soa, length);
+ return NewPrimitiveArray<jdoubleArray, mirror::DoubleArray>(env, length);
}
static jfloatArray NewFloatArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jfloatArray, mirror::FloatArray>(soa, length);
+ return NewPrimitiveArray<jfloatArray, mirror::FloatArray>(env, length);
}
static jintArray NewIntArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jintArray, mirror::IntArray>(soa, length);
+ return NewPrimitiveArray<jintArray, mirror::IntArray>(env, length);
}
static jlongArray NewLongArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jlongArray, mirror::LongArray>(soa, length);
+ return NewPrimitiveArray<jlongArray, mirror::LongArray>(env, length);
}
static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_jclass,
@@ -2028,6 +2039,7 @@
JniAbortF("NewObjectArray", "negative array length: %d", length);
return nullptr;
}
+ CHECK_NON_NULL_ARGUMENT(element_jclass);
// Compute the array class corresponding to the given element class.
ScopedObjectAccess soa(env);
@@ -2069,14 +2081,18 @@
}
static jshortArray NewShortArray(JNIEnv* env, jsize length) {
- ScopedObjectAccess soa(env);
- return NewPrimitiveArray<jshortArray, mirror::ShortArray>(soa, length);
+ return NewPrimitiveArray<jshortArray, mirror::ShortArray>(env, length);
}
static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray java_array, jboolean* is_copy) {
CHECK_NON_NULL_ARGUMENT(java_array);
ScopedObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
+ if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
+ JniAbortF("GetPrimitiveArrayCritical", "expected primitive array, given %s",
+ PrettyDescriptor(array->GetClass()).c_str());
+ return nullptr;
+ }
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(array)) {
heap->IncrementDisableMovingGC(soa.Self());
@@ -2090,196 +2106,174 @@
return array->GetRawData(array->GetClass()->GetComponentSize(), 0);
}
- static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* elements, jint mode) {
- CHECK_NON_NULL_ARGUMENT(array);
- ReleasePrimitiveArray(env, array, elements, mode);
+ static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray java_array, void* elements,
+ jint mode) {
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_array);
+ ScopedObjectAccess soa(env);
+ mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
+ if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
+ JniAbortF("ReleasePrimitiveArrayCritical", "expected primitive array, given %s",
+ PrettyDescriptor(array->GetClass()).c_str());
+ return;
+ }
+ const size_t component_size = array->GetClass()->GetComponentSize();
+ ReleasePrimitiveArray(soa, array, component_size, elements, mode);
}
static jboolean* GetBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jbooleanArray, jboolean*, mirror::BooleanArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jbooleanArray, jboolean, mirror::BooleanArray>(env, array, is_copy);
}
static jbyte* GetByteArrayElements(JNIEnv* env, jbyteArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jbyteArray, jbyte*, mirror::ByteArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jbyteArray, jbyte, mirror::ByteArray>(env, array, is_copy);
}
static jchar* GetCharArrayElements(JNIEnv* env, jcharArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jcharArray, jchar*, mirror::CharArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jcharArray, jchar, mirror::CharArray>(env, array, is_copy);
}
static jdouble* GetDoubleArrayElements(JNIEnv* env, jdoubleArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jdoubleArray, jdouble*, mirror::DoubleArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jdoubleArray, jdouble, mirror::DoubleArray>(env, array, is_copy);
}
static jfloat* GetFloatArrayElements(JNIEnv* env, jfloatArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jfloatArray, jfloat*, mirror::FloatArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jfloatArray, jfloat, mirror::FloatArray>(env, array, is_copy);
}
static jint* GetIntArrayElements(JNIEnv* env, jintArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jintArray, jint*, mirror::IntArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jintArray, jint, mirror::IntArray>(env, array, is_copy);
}
static jlong* GetLongArrayElements(JNIEnv* env, jlongArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jlongArray, jlong*, mirror::LongArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jlongArray, jlong, mirror::LongArray>(env, array, is_copy);
}
static jshort* GetShortArrayElements(JNIEnv* env, jshortArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(array);
- ScopedObjectAccess soa(env);
- return GetPrimitiveArray<jshortArray, jshort*, mirror::ShortArray>(soa, array, is_copy);
+ return GetPrimitiveArray<jshortArray, jshort, mirror::ShortArray>(env, array, is_copy);
}
static void ReleaseBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean* elements,
jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jbooleanArray, jboolean, mirror::BooleanArray>(env, array, elements,
+ mode);
}
static void ReleaseByteArrayElements(JNIEnv* env, jbyteArray array, jbyte* elements, jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jbyteArray, jbyte, mirror::ByteArray>(env, array, elements, mode);
}
static void ReleaseCharArrayElements(JNIEnv* env, jcharArray array, jchar* elements, jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jcharArray, jchar, mirror::CharArray>(env, array, elements, mode);
}
static void ReleaseDoubleArrayElements(JNIEnv* env, jdoubleArray array, jdouble* elements,
jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jdoubleArray, jdouble, mirror::DoubleArray>(env, array, elements, mode);
}
static void ReleaseFloatArrayElements(JNIEnv* env, jfloatArray array, jfloat* elements,
jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jfloatArray, jfloat, mirror::FloatArray>(env, array, elements, mode);
}
static void ReleaseIntArrayElements(JNIEnv* env, jintArray array, jint* elements, jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jintArray, jint, mirror::IntArray>(env, array, elements, mode);
}
static void ReleaseLongArrayElements(JNIEnv* env, jlongArray array, jlong* elements, jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jlongArray, jlong, mirror::LongArray>(env, array, elements, mode);
}
static void ReleaseShortArrayElements(JNIEnv* env, jshortArray array, jshort* elements,
jint mode) {
- ReleasePrimitiveArray(env, array, elements, mode);
+ ReleasePrimitiveArray<jshortArray, jshort, mirror::ShortArray>(env, array, elements, mode);
}
static void GetBooleanArrayRegion(JNIEnv* env, jbooleanArray array, jsize start, jsize length,
jboolean* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jbooleanArray, jboolean, mirror::BooleanArray>(soa, array, start,
+ GetPrimitiveArrayRegion<jbooleanArray, jboolean, mirror::BooleanArray>(env, array, start,
length, buf);
}
static void GetByteArrayRegion(JNIEnv* env, jbyteArray array, jsize start, jsize length,
jbyte* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jbyteArray, jbyte, mirror::ByteArray>(soa, array, start, length, buf);
+ GetPrimitiveArrayRegion<jbyteArray, jbyte, mirror::ByteArray>(env, array, start, length, buf);
}
static void GetCharArrayRegion(JNIEnv* env, jcharArray array, jsize start, jsize length,
jchar* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jcharArray, jchar, mirror::CharArray>(soa, array, start, length, buf);
+ GetPrimitiveArrayRegion<jcharArray, jchar, mirror::CharArray>(env, array, start, length, buf);
}
static void GetDoubleArrayRegion(JNIEnv* env, jdoubleArray array, jsize start, jsize length,
jdouble* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jdoubleArray, jdouble, mirror::DoubleArray>(soa, array, start, length,
+ GetPrimitiveArrayRegion<jdoubleArray, jdouble, mirror::DoubleArray>(env, array, start, length,
buf);
}
static void GetFloatArrayRegion(JNIEnv* env, jfloatArray array, jsize start, jsize length,
jfloat* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jfloatArray, jfloat, mirror::FloatArray>(soa, array, start, length,
+ GetPrimitiveArrayRegion<jfloatArray, jfloat, mirror::FloatArray>(env, array, start, length,
buf);
}
static void GetIntArrayRegion(JNIEnv* env, jintArray array, jsize start, jsize length,
jint* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jintArray, jint, mirror::IntArray>(soa, array, start, length, buf);
+ GetPrimitiveArrayRegion<jintArray, jint, mirror::IntArray>(env, array, start, length, buf);
}
static void GetLongArrayRegion(JNIEnv* env, jlongArray array, jsize start, jsize length,
jlong* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jlongArray, jlong, mirror::LongArray>(soa, array, start, length, buf);
+ GetPrimitiveArrayRegion<jlongArray, jlong, mirror::LongArray>(env, array, start, length, buf);
}
static void GetShortArrayRegion(JNIEnv* env, jshortArray array, jsize start, jsize length,
jshort* buf) {
- ScopedObjectAccess soa(env);
- GetPrimitiveArrayRegion<jshortArray, jshort, mirror::ShortArray>(soa, array, start, length,
+ GetPrimitiveArrayRegion<jshortArray, jshort, mirror::ShortArray>(env, array, start, length,
buf);
}
static void SetBooleanArrayRegion(JNIEnv* env, jbooleanArray array, jsize start, jsize length,
const jboolean* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jbooleanArray, jboolean, mirror::BooleanArray>(soa, array, start,
+ SetPrimitiveArrayRegion<jbooleanArray, jboolean, mirror::BooleanArray>(env, array, start,
length, buf);
}
static void SetByteArrayRegion(JNIEnv* env, jbyteArray array, jsize start, jsize length,
const jbyte* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jbyteArray, jbyte, mirror::ByteArray>(soa, array, start, length, buf);
+ SetPrimitiveArrayRegion<jbyteArray, jbyte, mirror::ByteArray>(env, array, start, length, buf);
}
static void SetCharArrayRegion(JNIEnv* env, jcharArray array, jsize start, jsize length,
const jchar* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jcharArray, jchar, mirror::CharArray>(soa, array, start, length, buf);
+ SetPrimitiveArrayRegion<jcharArray, jchar, mirror::CharArray>(env, array, start, length, buf);
}
static void SetDoubleArrayRegion(JNIEnv* env, jdoubleArray array, jsize start, jsize length,
const jdouble* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jdoubleArray, jdouble, mirror::DoubleArray>(soa, array, start, length,
+ SetPrimitiveArrayRegion<jdoubleArray, jdouble, mirror::DoubleArray>(env, array, start, length,
buf);
}
static void SetFloatArrayRegion(JNIEnv* env, jfloatArray array, jsize start, jsize length,
const jfloat* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jfloatArray, jfloat, mirror::FloatArray>(soa, array, start, length,
+ SetPrimitiveArrayRegion<jfloatArray, jfloat, mirror::FloatArray>(env, array, start, length,
buf);
}
static void SetIntArrayRegion(JNIEnv* env, jintArray array, jsize start, jsize length,
const jint* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jintArray, jint, mirror::IntArray>(soa, array, start, length, buf);
+ SetPrimitiveArrayRegion<jintArray, jint, mirror::IntArray>(env, array, start, length, buf);
}
static void SetLongArrayRegion(JNIEnv* env, jlongArray array, jsize start, jsize length,
const jlong* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jlongArray, jlong, mirror::LongArray>(soa, array, start, length, buf);
+ SetPrimitiveArrayRegion<jlongArray, jlong, mirror::LongArray>(env, array, start, length, buf);
}
static void SetShortArrayRegion(JNIEnv* env, jshortArray array, jsize start, jsize length,
const jshort* buf) {
- ScopedObjectAccess soa(env);
- SetPrimitiveArrayRegion<jshortArray, jshort, mirror::ShortArray>(soa, array, start, length,
+ SetPrimitiveArrayRegion<jshortArray, jshort, mirror::ShortArray>(env, array, start, length,
buf);
}
@@ -2294,7 +2288,7 @@
JniAbortF("RegisterNatives", "negative method count: %d", method_count);
return JNI_ERR; // Not reached.
}
- CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class);
+ CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class, JNI_ERR);
ScopedObjectAccess soa(env);
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
if (UNLIKELY(method_count == 0)) {
@@ -2302,7 +2296,7 @@
<< PrettyDescriptor(c);
return JNI_OK;
}
- CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", methods);
+ CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", methods, JNI_ERR);
for (jint i = 0; i < method_count; ++i) {
const char* name = methods[i].name;
const char* sig = methods[i].signature;
@@ -2339,30 +2333,37 @@
}
static jint UnregisterNatives(JNIEnv* env, jclass java_class) {
- CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT_RETURN(java_class, JNI_ERR);
ScopedObjectAccess soa(env);
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
VLOG(jni) << "[Unregistering JNI native methods for " << PrettyClass(c) << "]";
+ size_t unregistered_count = 0;
for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
mirror::ArtMethod* m = c->GetDirectMethod(i);
if (m->IsNative()) {
m->UnregisterNative(soa.Self());
+ unregistered_count++;
}
}
for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
mirror::ArtMethod* m = c->GetVirtualMethod(i);
if (m->IsNative()) {
m->UnregisterNative(soa.Self());
+ unregistered_count++;
}
}
+ if (unregistered_count == 0) {
+ LOG(WARNING) << "JNI UnregisterNatives: attempt to unregister native methods of class '"
+ << PrettyDescriptor(c) << "' that contains no native methods";
+ }
return JNI_OK;
}
static jint MonitorEnter(JNIEnv* env, jobject java_object) NO_THREAD_SAFETY_ANALYSIS {
- CHECK_NON_NULL_ARGUMENT(java_object);
+ CHECK_NON_NULL_ARGUMENT_RETURN(java_object, JNI_ERR);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
o = o->MonitorEnter(soa.Self());
@@ -2374,7 +2375,7 @@
}
static jint MonitorExit(JNIEnv* env, jobject java_object) NO_THREAD_SAFETY_ANALYSIS {
- CHECK_NON_NULL_ARGUMENT(java_object);
+ CHECK_NON_NULL_ARGUMENT_RETURN(java_object, JNI_ERR);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
o->MonitorExit(soa.Self());
@@ -2386,7 +2387,7 @@
}
static jint GetJavaVM(JNIEnv* env, JavaVM** vm) {
- CHECK_NON_NULL_ARGUMENT(vm);
+ CHECK_NON_NULL_ARGUMENT_RETURN(vm, JNI_ERR);
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
*vm = runtime->GetJavaVM();
@@ -2426,7 +2427,7 @@
}
static jobjectRefType GetObjectRefType(JNIEnv* env, jobject java_object) {
- CHECK_NON_NULL_ARGUMENT(java_object);
+ CHECK_NON_NULL_ARGUMENT_RETURN(java_object, JNIInvalidRefType);
// Do we definitely know what kind of reference this is?
IndirectRef ref = reinterpret_cast<IndirectRef>(java_object);
@@ -2443,9 +2444,9 @@
return JNIGlobalRefType;
case kWeakGlobal:
return JNIWeakGlobalRefType;
- case kSirtOrInvalid:
+ case kHandleScopeOrInvalid:
// Is it in a stack IRT?
- if (static_cast<JNIEnvExt*>(env)->self->SirtContains(java_object)) {
+ if (static_cast<JNIEnvExt*>(env)->self->HandleScopeContains(java_object)) {
return JNILocalRefType;
}
return JNIInvalidRefType;
@@ -2473,51 +2474,82 @@
}
template<typename JniT, typename ArtT>
- static JniT NewPrimitiveArray(const ScopedObjectAccess& soa, jsize length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static JniT NewPrimitiveArray(JNIEnv* env, jsize length) {
if (UNLIKELY(length < 0)) {
JniAbortF("NewPrimitiveArray", "negative array length: %d", length);
return nullptr;
}
+ ScopedObjectAccess soa(env);
ArtT* result = ArtT::Alloc(soa.Self(), length);
return soa.AddLocalReference<JniT>(result);
}
- template <typename ArrayT, typename CArrayT, typename ArtArrayT>
- static CArrayT GetPrimitiveArray(ScopedObjectAccess& soa, ArrayT java_array,
- jboolean* is_copy)
+ template <typename JArrayT, typename ElementT, typename ArtArrayT>
+ static ArtArrayT* DecodeAndCheckArrayType(ScopedObjectAccess& soa, JArrayT java_array,
+ const char* fn_name, const char* operation)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
+ if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
+ JniAbortF(fn_name, "attempt to %s %s primitive array elements with an object of type %s",
+ operation, PrettyDescriptor(ArtArrayT::GetArrayClass()->GetComponentType()).c_str(),
+ PrettyDescriptor(array->GetClass()).c_str());
+ return nullptr;
+ }
+ DCHECK_EQ(sizeof(ElementT), array->GetClass()->GetComponentSize());
+ return array;
+ }
+
+ template <typename ArrayT, typename ElementT, typename ArtArrayT>
+ static ElementT* GetPrimitiveArray(JNIEnv* env, ArrayT java_array, jboolean* is_copy) {
+ CHECK_NON_NULL_ARGUMENT(java_array);
+ ScopedObjectAccess soa(env);
+ ArtArrayT* array = DecodeAndCheckArrayType<ArrayT, ElementT, ArtArrayT>(soa, java_array,
+ "GetArrayElements",
+ "get");
+ if (UNLIKELY(array == nullptr)) {
+ return nullptr;
+ }
PinPrimitiveArray(soa, array);
// Only make a copy if necessary.
if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
if (is_copy != nullptr) {
*is_copy = JNI_TRUE;
}
- static const size_t component_size = array->GetClass()->GetComponentSize();
+ const size_t component_size = sizeof(ElementT);
size_t size = array->GetLength() * component_size;
void* data = new uint64_t[RoundUp(size, 8) / 8];
memcpy(data, array->GetData(), size);
- return reinterpret_cast<CArrayT>(data);
+ return reinterpret_cast<ElementT*>(data);
} else {
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
}
- return reinterpret_cast<CArrayT>(array->GetData());
+ return reinterpret_cast<ElementT*>(array->GetData());
}
}
- template <typename ArrayT, typename ElementT>
+ template <typename ArrayT, typename ElementT, typename ArtArrayT>
static void ReleasePrimitiveArray(JNIEnv* env, ArrayT java_array, ElementT* elements, jint mode) {
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_array);
ScopedObjectAccess soa(env);
- mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
- size_t component_size = array->GetClass()->GetComponentSize();
+ ArtArrayT* array = DecodeAndCheckArrayType<ArrayT, ElementT, ArtArrayT>(soa, java_array,
+ "ReleaseArrayElements",
+ "release");
+ if (array == nullptr) {
+ return;
+ }
+ ReleasePrimitiveArray(soa, array, sizeof(ElementT), elements, mode);
+ }
+
+ static void ReleasePrimitiveArray(ScopedObjectAccess& soa, mirror::Array* array,
+ size_t component_size, void* elements, jint mode)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
void* array_data = array->GetRawData(component_size, 0);
gc::Heap* heap = Runtime::Current()->GetHeap();
- bool is_copy = array_data != reinterpret_cast<void*>(elements);
+ bool is_copy = array_data != elements;
size_t bytes = array->GetLength() * component_size;
- VLOG(heap) << "Release primitive array " << env << " array_data " << array_data
- << " elements " << reinterpret_cast<void*>(elements);
+ VLOG(heap) << "Release primitive array " << soa.Env() << " array_data " << array_data
+ << " elements " << elements;
if (is_copy) {
// Sanity check: If elements is not the same as the java array's data, it better not be a
// heap address. TODO: This might be slow to check, may be worth keeping track of which
@@ -2543,33 +2575,43 @@
}
}
- template <typename JavaArrayT, typename JavaT, typename ArrayT>
- static void GetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array,
- jsize start, jsize length, JavaT* buf)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK_NON_NULL_ARGUMENT(java_array);
- ArrayT* array = soa.Decode<ArrayT*>(java_array);
- if (start < 0 || length < 0 || start + length > array->GetLength()) {
- ThrowAIOOBE(soa, array, start, length, "src");
- } else {
- CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
- JavaT* data = array->GetData();
- memcpy(buf, data + start, length * sizeof(JavaT));
+ template <typename JArrayT, typename ElementT, typename ArtArrayT>
+ static void GetPrimitiveArrayRegion(JNIEnv* env, JArrayT java_array,
+ jsize start, jsize length, ElementT* buf) {
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_array);
+ ScopedObjectAccess soa(env);
+ ArtArrayT* array =
+ DecodeAndCheckArrayType<JArrayT, ElementT, ArtArrayT>(soa, java_array,
+ "GetPrimitiveArrayRegion",
+ "get region of");
+ if (array != nullptr) {
+ if (start < 0 || length < 0 || start + length > array->GetLength()) {
+ ThrowAIOOBE(soa, array, start, length, "src");
+ } else {
+ CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
+ ElementT* data = array->GetData();
+ memcpy(buf, data + start, length * sizeof(ElementT));
+ }
}
}
- template <typename JavaArrayT, typename JavaT, typename ArrayT>
- static void SetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array,
- jsize start, jsize length, const JavaT* buf)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK_NON_NULL_ARGUMENT(java_array);
- ArrayT* array = soa.Decode<ArrayT*>(java_array);
- if (start < 0 || length < 0 || start + length > array->GetLength()) {
- ThrowAIOOBE(soa, array, start, length, "dst");
- } else {
- CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
- JavaT* data = array->GetData();
- memcpy(data + start, buf, length * sizeof(JavaT));
+ template <typename JArrayT, typename ElementT, typename ArtArrayT>
+ static void SetPrimitiveArrayRegion(JNIEnv* env, JArrayT java_array,
+ jsize start, jsize length, const ElementT* buf) {
+ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_array);
+ ScopedObjectAccess soa(env);
+ ArtArrayT* array =
+ DecodeAndCheckArrayType<JArrayT, ElementT, ArtArrayT>(soa, java_array,
+ "SetPrimitiveArrayRegion",
+ "set region of");
+ if (array != nullptr) {
+ if (start < 0 || length < 0 || start + length > array->GetLength()) {
+ ThrowAIOOBE(soa, array, start, length, "dst");
+ } else {
+ CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
+ ElementT* data = array->GetData();
+ memcpy(data + start, buf, length * sizeof(ElementT));
+ }
}
}
};
@@ -3090,7 +3132,7 @@
}
bool JavaVMExt::LoadNativeLibrary(const std::string& path,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::ClassLoader>& class_loader,
std::string* detail) {
detail->clear();
@@ -3106,18 +3148,18 @@
library = libraries->Get(path);
}
if (library != nullptr) {
- if (library->GetClassLoader() != class_loader.get()) {
+ if (library->GetClassLoader() != class_loader.Get()) {
// The library will be associated with class_loader. The JNI
// spec says we can't load the same library into more than one
// class loader.
StringAppendF(detail, "Shared library \"%s\" already opened by "
"ClassLoader %p; can't open in ClassLoader %p",
- path.c_str(), library->GetClassLoader(), class_loader.get());
+ path.c_str(), library->GetClassLoader(), class_loader.Get());
LOG(WARNING) << detail;
return false;
}
VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
- << "ClassLoader " << class_loader.get() << "]";
+ << "ClassLoader " << class_loader.Get() << "]";
if (!library->CheckOnLoadResult()) {
StringAppendF(detail, "JNI_OnLoad failed on a previous attempt "
"to load \"%s\"", path.c_str());
@@ -3158,18 +3200,18 @@
MutexLock mu(self, libraries_lock);
library = libraries->Get(path);
if (library == nullptr) { // We won race to get libraries_lock
- library = new SharedLibrary(path, handle, class_loader.get());
+ library = new SharedLibrary(path, handle, class_loader.Get());
libraries->Put(path, library);
created_library = true;
}
}
if (!created_library) {
LOG(INFO) << "WOW: we lost a race to add shared library: "
- << "\"" << path << "\" ClassLoader=" << class_loader.get();
+ << "\"" << path << "\" ClassLoader=" << class_loader.Get();
return library->CheckOnLoadResult();
}
- VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader.get()
+ VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader.Get()
<< "]";
bool was_successful = false;
@@ -3184,8 +3226,9 @@
// the comments in the JNI FindClass function.)
typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
- SirtRef<mirror::ClassLoader> old_class_loader(self, self->GetClassLoaderOverride());
- self->SetClassLoaderOverride(class_loader.get());
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> old_class_loader(hs.NewHandle(self->GetClassLoaderOverride()));
+ self->SetClassLoaderOverride(class_loader.Get());
int version = 0;
{
@@ -3194,7 +3237,7 @@
version = (*jni_on_load)(this, nullptr);
}
- self->SetClassLoaderOverride(old_class_loader.get());
+ self->SetClassLoaderOverride(old_class_loader.Get());
if (version == JNI_ERR) {
StringAppendF(detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index cdf3c47..5964947 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -46,7 +46,7 @@
class Libraries;
class ParsedOptions;
class ScopedObjectAccess;
-template<class T> class SirtRef;
+template<class T> class Handle;
class Thread;
void JniAbortF(const char* jni_function_name, const char* fmt, ...)
@@ -67,7 +67,7 @@
* Returns 'true' on success. On failure, sets 'detail' to a
* human-readable description of the error.
*/
- bool LoadNativeLibrary(const std::string& path, const SirtRef<mirror::ClassLoader>& class_loader,
+ bool LoadNativeLibrary(const std::string& path, const Handle<mirror::ClassLoader>& class_loader,
std::string* detail)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 778b9e5..83e9b10 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -33,35 +33,54 @@
// Turn on -verbose:jni for the JNI tests.
// gLogVerbosity.jni = true;
- vm_->AttachCurrentThread(&env_, NULL);
+ vm_->AttachCurrentThread(&env_, nullptr);
ScopedLocalRef<jclass> aioobe(env_,
env_->FindClass("java/lang/ArrayIndexOutOfBoundsException"));
- CHECK(aioobe.get() != NULL);
+ CHECK(aioobe.get() != nullptr);
aioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(aioobe.get()));
ScopedLocalRef<jclass> ase(env_, env_->FindClass("java/lang/ArrayStoreException"));
- CHECK(ase.get() != NULL);
+ CHECK(ase.get() != nullptr);
ase_ = reinterpret_cast<jclass>(env_->NewGlobalRef(ase.get()));
ScopedLocalRef<jclass> sioobe(env_,
env_->FindClass("java/lang/StringIndexOutOfBoundsException"));
- CHECK(sioobe.get() != NULL);
+ CHECK(sioobe.get() != nullptr);
sioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(sioobe.get()));
}
+ void ExpectException(jclass exception_class) {
+ EXPECT_TRUE(env_->ExceptionCheck());
+ jthrowable exception = env_->ExceptionOccurred();
+ EXPECT_NE(nullptr, exception);
+ env_->ExceptionClear();
+ EXPECT_TRUE(env_->IsInstanceOf(exception, exception_class));
+ }
+
+ void ExpectClassFound(const char* name) {
+ EXPECT_NE(env_->FindClass(name), nullptr) << name;
+ EXPECT_FALSE(env_->ExceptionCheck()) << name;
+ }
+
+ void ExpectClassNotFound(const char* name) {
+ EXPECT_EQ(env_->FindClass(name), nullptr) << name;
+ EXPECT_TRUE(env_->ExceptionCheck()) << name;
+ env_->ExceptionClear();
+ }
+
void CleanUpJniEnv() {
- if (aioobe_ != NULL) {
+ if (aioobe_ != nullptr) {
env_->DeleteGlobalRef(aioobe_);
- aioobe_ = NULL;
+ aioobe_ = nullptr;
}
- if (ase_ != NULL) {
+ if (ase_ != nullptr) {
env_->DeleteGlobalRef(ase_);
- ase_ = NULL;
+ ase_ = nullptr;
}
- if (sioobe_ != NULL) {
+ if (sioobe_ != nullptr) {
env_->DeleteGlobalRef(sioobe_);
- sioobe_ = NULL;
+ sioobe_ = nullptr;
}
}
@@ -86,9 +105,9 @@
TEST_F(JniInternalTest, AllocObject) {
jclass c = env_->FindClass("java/lang/String");
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
jobject o = env_->AllocObject(c);
- ASSERT_TRUE(o != NULL);
+ ASSERT_NE(o, nullptr);
// We have an instance of the class we asked for...
ASSERT_TRUE(env_->IsInstanceOf(o, c));
@@ -96,139 +115,152 @@
// we didn't call a constructor.
ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "offset", "I")));
- ASSERT_TRUE(env_->GetObjectField(o, env_->GetFieldID(c, "value", "[C")) == NULL);
+ ASSERT_TRUE(env_->GetObjectField(o, env_->GetFieldID(c, "value", "[C")) == nullptr);
}
TEST_F(JniInternalTest, GetVersion) {
ASSERT_EQ(JNI_VERSION_1_6, env_->GetVersion());
}
-#define EXPECT_CLASS_FOUND(NAME) \
- EXPECT_TRUE(env_->FindClass(NAME) != NULL); \
- EXPECT_FALSE(env_->ExceptionCheck())
-
-#define EXPECT_CLASS_NOT_FOUND(NAME) \
- EXPECT_TRUE(env_->FindClass(NAME) == NULL); \
- EXPECT_TRUE(env_->ExceptionCheck()); \
- env_->ExceptionClear()
-
TEST_F(JniInternalTest, FindClass) {
// Reference types...
- EXPECT_CLASS_FOUND("java/lang/String");
+ ExpectClassFound("java/lang/String");
// ...for arrays too, where you must include "L;".
- EXPECT_CLASS_FOUND("[Ljava/lang/String;");
+ ExpectClassFound("[Ljava/lang/String;");
// Primitive arrays are okay too, if the primitive type is valid.
- EXPECT_CLASS_FOUND("[C");
+ ExpectClassFound("[C");
{
- // We support . as well as / for compatibility, if -Xcheck:jni is off.
CheckJniAbortCatcher check_jni_abort_catcher;
- EXPECT_CLASS_FOUND("java.lang.String");
+ env_->FindClass(nullptr);
+ check_jni_abort_catcher.Check("name == null");
+
+ // We support . as well as / for compatibility, if -Xcheck:jni is off.
+ ExpectClassFound("java.lang.String");
check_jni_abort_catcher.Check("illegal class name 'java.lang.String'");
- EXPECT_CLASS_NOT_FOUND("Ljava.lang.String;");
+ ExpectClassNotFound("Ljava.lang.String;");
check_jni_abort_catcher.Check("illegal class name 'Ljava.lang.String;'");
- EXPECT_CLASS_FOUND("[Ljava.lang.String;");
+ ExpectClassFound("[Ljava.lang.String;");
check_jni_abort_catcher.Check("illegal class name '[Ljava.lang.String;'");
- EXPECT_CLASS_NOT_FOUND("[java.lang.String");
+ ExpectClassNotFound("[java.lang.String");
check_jni_abort_catcher.Check("illegal class name '[java.lang.String'");
// You can't include the "L;" in a JNI class descriptor.
- EXPECT_CLASS_NOT_FOUND("Ljava/lang/String;");
+ ExpectClassNotFound("Ljava/lang/String;");
check_jni_abort_catcher.Check("illegal class name 'Ljava/lang/String;'");
// But you must include it for an array of any reference type.
- EXPECT_CLASS_NOT_FOUND("[java/lang/String");
+ ExpectClassNotFound("[java/lang/String");
check_jni_abort_catcher.Check("illegal class name '[java/lang/String'");
- EXPECT_CLASS_NOT_FOUND("[K");
+ ExpectClassNotFound("[K");
check_jni_abort_catcher.Check("illegal class name '[K'");
+
+ // Void arrays aren't allowed.
+ ExpectClassNotFound("[V");
+ check_jni_abort_catcher.Check("illegal class name '[V'");
}
// But primitive types aren't allowed...
- EXPECT_CLASS_NOT_FOUND("C");
- EXPECT_CLASS_NOT_FOUND("K");
+ ExpectClassNotFound("C");
+ ExpectClassNotFound("V");
+ ExpectClassNotFound("K");
}
-#define EXPECT_EXCEPTION(exception_class) \
- do { \
- EXPECT_TRUE(env_->ExceptionCheck()); \
- jthrowable exception = env_->ExceptionOccurred(); \
- EXPECT_NE(static_cast<jthrowable>(NULL), exception); \
- env_->ExceptionClear(); \
- EXPECT_TRUE(env_->IsInstanceOf(exception, exception_class)); \
- } while (false)
-
TEST_F(JniInternalTest, GetFieldID) {
jclass jlnsfe = env_->FindClass("java/lang/NoSuchFieldError");
- ASSERT_TRUE(jlnsfe != NULL);
+ ASSERT_NE(jlnsfe, nullptr);
jclass c = env_->FindClass("java/lang/String");
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
// Wrong type.
jfieldID fid = env_->GetFieldID(c, "count", "J");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
// Wrong type where type doesn't exist.
fid = env_->GetFieldID(c, "count", "Lrod/jane/freddy;");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
// Wrong name.
fid = env_->GetFieldID(c, "Count", "I");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
// Good declared field lookup.
fid = env_->GetFieldID(c, "count", "I");
- EXPECT_NE(static_cast<jfieldID>(NULL), fid);
- EXPECT_TRUE(fid != NULL);
+ EXPECT_NE(nullptr, fid);
EXPECT_FALSE(env_->ExceptionCheck());
// Good superclass field lookup.
c = env_->FindClass("java/lang/StringBuilder");
fid = env_->GetFieldID(c, "count", "I");
- EXPECT_NE(static_cast<jfieldID>(NULL), fid);
- EXPECT_TRUE(fid != NULL);
+ EXPECT_NE(nullptr, fid);
+ EXPECT_NE(fid, nullptr);
EXPECT_FALSE(env_->ExceptionCheck());
// Not instance.
fid = env_->GetFieldID(c, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
+
+ // Bad arguments.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ fid = env_->GetFieldID(nullptr, "count", "I");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check("java_class == null");
+ fid = env_->GetFieldID(c, nullptr, "I");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check("name == null");
+ fid = env_->GetFieldID(c, "count", nullptr);
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check("sig == null");
}
TEST_F(JniInternalTest, GetStaticFieldID) {
jclass jlnsfe = env_->FindClass("java/lang/NoSuchFieldError");
- ASSERT_TRUE(jlnsfe != NULL);
+ ASSERT_NE(jlnsfe, nullptr);
jclass c = env_->FindClass("java/lang/String");
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
// Wrong type.
jfieldID fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", "J");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
// Wrong type where type doesn't exist.
fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", "Lrod/jane/freddy;");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
// Wrong name.
fid = env_->GetStaticFieldID(c, "cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
// Good declared field lookup.
fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
- EXPECT_NE(static_cast<jfieldID>(NULL), fid);
- EXPECT_TRUE(fid != NULL);
+ EXPECT_NE(nullptr, fid);
+ EXPECT_NE(fid, nullptr);
EXPECT_FALSE(env_->ExceptionCheck());
// Not static.
fid = env_->GetStaticFieldID(c, "count", "I");
- EXPECT_EQ(static_cast<jfieldID>(NULL), fid);
- EXPECT_EXCEPTION(jlnsfe);
+ EXPECT_EQ(nullptr, fid);
+ ExpectException(jlnsfe);
+
+ // Bad arguments.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ fid = env_->GetStaticFieldID(nullptr, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check("java_class == null");
+ fid = env_->GetStaticFieldID(c, nullptr, "Ljava/util/Comparator;");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check("name == null");
+ fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", nullptr);
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check("sig == null");
}
TEST_F(JniInternalTest, GetMethodID) {
@@ -242,24 +274,36 @@
// Check that java.lang.Object.foo() doesn't exist and NoSuchMethodError is
// a pending exception
jmethodID method = env_->GetMethodID(jlobject, "foo", "()V");
- EXPECT_EQ(static_cast<jmethodID>(NULL), method);
- EXPECT_EXCEPTION(jlnsme);
+ EXPECT_EQ(nullptr, method);
+ ExpectException(jlnsme);
// Check that java.lang.Object.equals() does exist
method = env_->GetMethodID(jlobject, "equals", "(Ljava/lang/Object;)Z");
- EXPECT_NE(static_cast<jmethodID>(NULL), method);
+ EXPECT_NE(nullptr, method);
EXPECT_FALSE(env_->ExceptionCheck());
// Check that GetMethodID for java.lang.String.valueOf(int) fails as the
// method is static
method = env_->GetMethodID(jlstring, "valueOf", "(I)Ljava/lang/String;");
- EXPECT_EQ(static_cast<jmethodID>(NULL), method);
- EXPECT_EXCEPTION(jlnsme);
+ EXPECT_EQ(nullptr, method);
+ ExpectException(jlnsme);
// Check that GetMethodID for java.lang.NoSuchMethodError.<init>(String) finds the constructor
method = env_->GetMethodID(jlnsme, "<init>", "(Ljava/lang/String;)V");
- EXPECT_NE(static_cast<jmethodID>(NULL), method);
+ EXPECT_NE(nullptr, method);
EXPECT_FALSE(env_->ExceptionCheck());
+
+ // Bad arguments.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check("java_class == null");
+ method = env_->GetMethodID(jlnsme, nullptr, "(Ljava/lang/String;)V");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check("name == null");
+ method = env_->GetMethodID(jlnsme, "<init>", nullptr);
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check("sig == null");
}
TEST_F(JniInternalTest, GetStaticMethodID) {
@@ -272,93 +316,155 @@
// Check that java.lang.Object.foo() doesn't exist and NoSuchMethodError is
// a pending exception
jmethodID method = env_->GetStaticMethodID(jlobject, "foo", "()V");
- EXPECT_EQ(static_cast<jmethodID>(NULL), method);
- EXPECT_EXCEPTION(jlnsme);
+ EXPECT_EQ(nullptr, method);
+ ExpectException(jlnsme);
// Check that GetStaticMethodID for java.lang.Object.equals(Object) fails as
// the method is not static
method = env_->GetStaticMethodID(jlobject, "equals", "(Ljava/lang/Object;)Z");
- EXPECT_EQ(static_cast<jmethodID>(NULL), method);
- EXPECT_EXCEPTION(jlnsme);
+ EXPECT_EQ(nullptr, method);
+ ExpectException(jlnsme);
// Check that java.lang.String.valueOf(int) does exist
jclass jlstring = env_->FindClass("java/lang/String");
- method = env_->GetStaticMethodID(jlstring, "valueOf",
- "(I)Ljava/lang/String;");
- EXPECT_NE(static_cast<jmethodID>(NULL), method);
+ method = env_->GetStaticMethodID(jlstring, "valueOf", "(I)Ljava/lang/String;");
+ EXPECT_NE(nullptr, method);
EXPECT_FALSE(env_->ExceptionCheck());
+
+ // Bad arguments.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ method = env_->GetStaticMethodID(nullptr, "valueOf", "(I)Ljava/lang/String;");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check("java_class == null");
+ method = env_->GetStaticMethodID(jlstring, nullptr, "(I)Ljava/lang/String;");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check("name == null");
+ method = env_->GetStaticMethodID(jlstring, "valueOf", nullptr);
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check("sig == null");
}
TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) {
jclass jlrField = env_->FindClass("java/lang/reflect/Field");
jclass c = env_->FindClass("java/lang/String");
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
jfieldID fid = env_->GetFieldID(c, "count", "I");
- ASSERT_TRUE(fid != NULL);
+ ASSERT_NE(fid, nullptr);
// Turn the fid into a java.lang.reflect.Field...
jobject field = env_->ToReflectedField(c, fid, JNI_FALSE);
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
ASSERT_TRUE(env_->IsInstanceOf(field, jlrField));
// ...and back again.
jfieldID fid2 = env_->FromReflectedField(field);
- ASSERT_TRUE(fid2 != NULL);
+ ASSERT_NE(fid2, nullptr);
// Make sure we can actually use it.
jstring s = env_->NewStringUTF("poop");
ASSERT_EQ(4, env_->GetIntField(s, fid2));
+
+ // Bad arguments.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ field = env_->ToReflectedField(c, nullptr, JNI_FALSE);
+ EXPECT_EQ(field, nullptr);
+ check_jni_abort_catcher.Check("fid == null");
+ fid2 = env_->FromReflectedField(nullptr);
+ ASSERT_EQ(fid2, nullptr);
+ check_jni_abort_catcher.Check("jlr_field == null");
}
TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
jclass jlrMethod = env_->FindClass("java/lang/reflect/Method");
jclass c = env_->FindClass("java/lang/String");
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
jmethodID mid = env_->GetMethodID(c, "length", "()I");
- ASSERT_TRUE(mid != NULL);
+ ASSERT_NE(mid, nullptr);
// Turn the mid into a java.lang.reflect.Method...
jobject method = env_->ToReflectedMethod(c, mid, JNI_FALSE);
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
ASSERT_TRUE(env_->IsInstanceOf(method, jlrMethod));
// ...and back again.
jmethodID mid2 = env_->FromReflectedMethod(method);
- ASSERT_TRUE(mid2 != NULL);
+ ASSERT_NE(mid2, nullptr);
// Make sure we can actually use it.
jstring s = env_->NewStringUTF("poop");
ASSERT_EQ(4, env_->CallIntMethod(s, mid2));
+
+ // Bad arguments.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ method = env_->ToReflectedMethod(c, nullptr, JNI_FALSE);
+ EXPECT_EQ(method, nullptr);
+ check_jni_abort_catcher.Check("mid == null");
+ mid2 = env_->FromReflectedMethod(method);
+ ASSERT_EQ(mid2, nullptr);
+ check_jni_abort_catcher.Check("jlr_method == null");
}
-void BogusMethod() {
- // You can't pass NULL function pointers to RegisterNatives.
+static void BogusMethod() {
+ // You can't pass nullptr function pointers to RegisterNatives.
}
-TEST_F(JniInternalTest, RegisterNatives) {
+TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
jclass jlobject = env_->FindClass("java/lang/Object");
jclass jlnsme = env_->FindClass("java/lang/NoSuchMethodError");
- // Sanity check that no exceptions are pending
+ // Sanity check that no exceptions are pending.
ASSERT_FALSE(env_->ExceptionCheck());
- // Check that registering to a non-existent java.lang.Object.foo() causes a
- // NoSuchMethodError
+ // Check that registering to a non-existent java.lang.Object.foo() causes a NoSuchMethodError.
{
- JNINativeMethod methods[] = { { "foo", "()V", NULL } };
- env_->RegisterNatives(jlobject, methods, 1);
+ JNINativeMethod methods[] = { { "foo", "()V", nullptr } };
+ EXPECT_EQ(env_->RegisterNatives(jlobject, methods, 1), JNI_ERR);
}
- EXPECT_EXCEPTION(jlnsme);
+ ExpectException(jlnsme);
- // Check that registering non-native methods causes a NoSuchMethodError
+ // Check that registering non-native methods causes a NoSuchMethodError.
{
- JNINativeMethod methods[] = { { "equals", "(Ljava/lang/Object;)Z", NULL } };
- env_->RegisterNatives(jlobject, methods, 1);
+ JNINativeMethod methods[] = { { "equals", "(Ljava/lang/Object;)Z", nullptr } };
+ EXPECT_EQ(env_->RegisterNatives(jlobject, methods, 1), JNI_ERR);
}
- EXPECT_EXCEPTION(jlnsme);
+ ExpectException(jlnsme);
- // Check that registering native methods is successful
+ // Check that registering native methods is successful.
{
JNINativeMethod methods[] = { { "notify", "()V", reinterpret_cast<void*>(BogusMethod) } };
- env_->RegisterNatives(jlobject, methods, 1);
+ EXPECT_EQ(env_->RegisterNatives(jlobject, methods, 1), JNI_OK);
+ }
+ EXPECT_FALSE(env_->ExceptionCheck());
+ EXPECT_EQ(env_->UnregisterNatives(jlobject), JNI_OK);
+
+ // Check that registering no methods isn't a failure.
+ {
+ JNINativeMethod methods[] = { };
+ EXPECT_EQ(env_->RegisterNatives(jlobject, methods, 0), JNI_OK);
+ }
+ EXPECT_FALSE(env_->ExceptionCheck());
+ EXPECT_EQ(env_->UnregisterNatives(jlobject), JNI_OK);
+
+ // Check that registering a -ve number of methods is a failure.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ for (int i = -10; i < 0; ++i) {
+ JNINativeMethod methods[] = { };
+ EXPECT_EQ(env_->RegisterNatives(jlobject, methods, i), JNI_ERR);
+ check_jni_abort_catcher.Check("negative method count: ");
}
EXPECT_FALSE(env_->ExceptionCheck());
- env_->UnregisterNatives(jlobject);
+ // Passing a class of null is a failure.
+ {
+ JNINativeMethod methods[] = { };
+ EXPECT_EQ(env_->RegisterNatives(nullptr, methods, 0), JNI_ERR);
+ check_jni_abort_catcher.Check("java_class == null");
+ }
+
+ // Passing methods as null is a failure.
+ EXPECT_EQ(env_->RegisterNatives(jlobject, nullptr, 1), JNI_ERR);
+ check_jni_abort_catcher.Check("methods == null");
+
+ // Unregisters null is a failure.
+ EXPECT_EQ(env_->UnregisterNatives(nullptr), JNI_ERR);
+ check_jni_abort_catcher.Check("java_class == null");
+
+ // Unregistering a class with no natives is a warning.
+ EXPECT_EQ(env_->UnregisterNatives(jlnsme), JNI_OK);
}
#define EXPECT_PRIMITIVE_ARRAY(new_fn, \
@@ -368,52 +474,69 @@
release_elements_fn, \
scalar_type, \
expected_class_descriptor) \
+ jsize size = 4; \
+ \
{ \
CheckJniAbortCatcher jni_abort_catcher; \
/* Allocate an negative sized array and check it has the right failure type. */ \
- env_->new_fn(-1); \
+ EXPECT_EQ(env_->new_fn(-1), nullptr); \
jni_abort_catcher.Check("negative array length: -1"); \
- env_->new_fn(std::numeric_limits<jint>::min()); \
+ EXPECT_EQ(env_->new_fn(std::numeric_limits<jint>::min()), nullptr); \
jni_abort_catcher.Check("negative array length: -2147483648"); \
+ /* Pass the array as null. */ \
+ EXPECT_EQ(0, env_->GetArrayLength(nullptr)); \
+ jni_abort_catcher.Check("java_array == null"); \
+ env_->get_region_fn(nullptr, 0, 0, nullptr); \
+ jni_abort_catcher.Check("java_array == null"); \
+ env_->set_region_fn(nullptr, 0, 0, nullptr); \
+ jni_abort_catcher.Check("java_array == null"); \
+ env_->get_elements_fn(nullptr, nullptr); \
+ jni_abort_catcher.Check("java_array == null"); \
+ env_->release_elements_fn(nullptr, nullptr, 0); \
+ jni_abort_catcher.Check("java_array == null"); \
+ /* Pass the elements for region as null. */ \
+ scalar_type ## Array a = env_->new_fn(size); \
+ env_->get_region_fn(a, 0, size, nullptr); \
+ jni_abort_catcher.Check("buf == null"); \
+ env_->set_region_fn(a, 0, size, nullptr); \
+ jni_abort_catcher.Check("buf == null"); \
} \
- jsize size = 4; \
- \
/* Allocate an array and check it has the right type and length. */ \
scalar_type ## Array a = env_->new_fn(size); \
- EXPECT_TRUE(a != NULL); \
+ EXPECT_NE(a, nullptr); \
EXPECT_TRUE(env_->IsInstanceOf(a, env_->FindClass(expected_class_descriptor))); \
EXPECT_EQ(size, env_->GetArrayLength(a)); \
\
/* GetPrimitiveArrayRegion/SetPrimitiveArrayRegion */ \
/* AIOOBE for negative start offset. */ \
- env_->get_region_fn(a, -1, 1, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
- env_->set_region_fn(a, -1, 1, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
+ env_->get_region_fn(a, -1, 1, nullptr); \
+ ExpectException(aioobe_); \
+ env_->set_region_fn(a, -1, 1, nullptr); \
+ ExpectException(aioobe_); \
\
/* AIOOBE for negative length. */ \
- env_->get_region_fn(a, 0, -1, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
- env_->set_region_fn(a, 0, -1, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
+ env_->get_region_fn(a, 0, -1, nullptr); \
+ ExpectException(aioobe_); \
+ env_->set_region_fn(a, 0, -1, nullptr); \
+ ExpectException(aioobe_); \
\
/* AIOOBE for buffer overrun. */ \
- env_->get_region_fn(a, size - 1, size, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
- env_->set_region_fn(a, size - 1, size, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
+ env_->get_region_fn(a, size - 1, size, nullptr); \
+ ExpectException(aioobe_); \
+ env_->set_region_fn(a, size - 1, size, nullptr); \
+ ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be NULL as long as the length is 0. */ \
- env_->get_region_fn(a, 2, 0, NULL); \
+ /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ env_->get_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
- env_->get_region_fn(a, 123, 0, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
+ env_->get_region_fn(a, 123, 0, nullptr); \
+ ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be NULL as long as the length is 0. */ \
- env_->set_region_fn(a, 2, 0, NULL); \
+ /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ env_->set_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
- env_->set_region_fn(a, 123, 0, NULL); \
- EXPECT_EXCEPTION(aioobe_); \
+ env_->set_region_fn(a, 123, 0, nullptr); \
+ ExpectException(aioobe_); \
\
/* Prepare a couple of buffers. */ \
UniquePtr<scalar_type[]> src_buf(new scalar_type[size]); \
@@ -437,12 +560,12 @@
EXPECT_EQ(memcmp(&src_buf[0], &dst_buf[0], size * sizeof(scalar_type)), 0) \
<< "full copy not equal"; \
/* GetPrimitiveArrayCritical */ \
- void* v = env_->GetPrimitiveArrayCritical(a, NULL); \
+ void* v = env_->GetPrimitiveArrayCritical(a, nullptr); \
EXPECT_EQ(memcmp(&src_buf[0], v, size * sizeof(scalar_type)), 0) \
<< "GetPrimitiveArrayCritical not equal"; \
env_->ReleasePrimitiveArrayCritical(a, v, 0); \
/* GetXArrayElements */ \
- scalar_type* xs = env_->get_elements_fn(a, NULL); \
+ scalar_type* xs = env_->get_elements_fn(a, nullptr); \
EXPECT_EQ(memcmp(&src_buf[0], xs, size * sizeof(scalar_type)), 0) \
<< # get_elements_fn " not equal"; \
env_->release_elements_fn(a, xs, 0); \
@@ -480,31 +603,206 @@
GetShortArrayElements, ReleaseShortArrayElements, jshort, "[S");
}
+TEST_F(JniInternalTest, GetPrimitiveArrayElementsOfWrongType) {
+ CheckJniAbortCatcher jni_abort_catcher;
+ jbooleanArray array = env_->NewBooleanArray(10);
+ jboolean is_copy;
+ EXPECT_EQ(env_->GetByteArrayElements(reinterpret_cast<jbyteArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get byte primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetShortArrayElements(reinterpret_cast<jshortArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get short primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetCharArrayElements(reinterpret_cast<jcharArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get char primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetIntArrayElements(reinterpret_cast<jintArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get int primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetLongArrayElements(reinterpret_cast<jlongArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get long primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetFloatArrayElements(reinterpret_cast<jfloatArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get float primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetDoubleArrayElements(reinterpret_cast<jdoubleArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ "attempt to get boolean primitive array elements with an object of type java.lang.String");
+}
+
+TEST_F(JniInternalTest, ReleasePrimitiveArrayElementsOfWrongType) {
+ CheckJniAbortCatcher jni_abort_catcher;
+ jbooleanArray array = env_->NewBooleanArray(10);
+ ASSERT_TRUE(array != nullptr);
+ jboolean is_copy;
+ jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
+ ASSERT_TRUE(elements != nullptr);
+ env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
+ reinterpret_cast<jbyte*>(elements), 0);
+ jni_abort_catcher.Check(
+ "attempt to release byte primitive array elements with an object of type boolean[]");
+ env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
+ reinterpret_cast<jshort*>(elements), 0);
+ jni_abort_catcher.Check(
+ "attempt to release short primitive array elements with an object of type boolean[]");
+ env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
+ reinterpret_cast<jchar*>(elements), 0);
+ jni_abort_catcher.Check(
+ "attempt to release char primitive array elements with an object of type boolean[]");
+ env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
+ reinterpret_cast<jint*>(elements), 0);
+ jni_abort_catcher.Check(
+ "attempt to release int primitive array elements with an object of type boolean[]");
+ env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
+ reinterpret_cast<jlong*>(elements), 0);
+ jni_abort_catcher.Check(
+ "attempt to release long primitive array elements with an object of type boolean[]");
+ env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
+ reinterpret_cast<jfloat*>(elements), 0);
+ jni_abort_catcher.Check(
+ "attempt to release float primitive array elements with an object of type boolean[]");
+ env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
+ reinterpret_cast<jdouble*>(elements), 0);
+ jni_abort_catcher.Check(
+ "attempt to release double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
+ jni_abort_catcher.Check(
+ "attempt to release boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
+ jni_abort_catcher.Check(
+ "attempt to release boolean primitive array elements with an object of type java.lang.String");
+}
+TEST_F(JniInternalTest, GetReleasePrimitiveArrayCriticalOfWrongType) {
+ CheckJniAbortCatcher jni_abort_catcher;
+ jobject object = env_->NewStringUTF("Test String");
+ jboolean is_copy;
+ void* elements = env_->GetPrimitiveArrayCritical(reinterpret_cast<jarray>(object), &is_copy);
+ jni_abort_catcher.Check("expected primitive array, given java.lang.String");
+ env_->ReleasePrimitiveArrayCritical(reinterpret_cast<jarray>(object), elements, 0);
+ jni_abort_catcher.Check("expected primitive array, given java.lang.String");
+}
+
+TEST_F(JniInternalTest, GetPrimitiveArrayRegionElementsOfWrongType) {
+ CheckJniAbortCatcher jni_abort_catcher;
+ constexpr size_t kLength = 10;
+ jbooleanArray array = env_->NewBooleanArray(kLength);
+ ASSERT_TRUE(array != nullptr);
+ jboolean elements[kLength];
+ env_->GetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+ reinterpret_cast<jbyte*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of byte primitive array elements with an object of type boolean[]");
+ env_->GetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+ reinterpret_cast<jshort*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of short primitive array elements with an object of type boolean[]");
+ env_->GetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+ reinterpret_cast<jchar*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of char primitive array elements with an object of type boolean[]");
+ env_->GetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+ reinterpret_cast<jint*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of int primitive array elements with an object of type boolean[]");
+ env_->GetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+ reinterpret_cast<jlong*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of long primitive array elements with an object of type boolean[]");
+ env_->GetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+ reinterpret_cast<jfloat*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of float primitive array elements with an object of type boolean[]");
+ env_->GetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+ reinterpret_cast<jdouble*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to get region of boolean primitive array elements with an object of type java.lang.String");
+}
+
+TEST_F(JniInternalTest, SetPrimitiveArrayRegionElementsOfWrongType) {
+ CheckJniAbortCatcher jni_abort_catcher;
+ constexpr size_t kLength = 10;
+ jbooleanArray array = env_->NewBooleanArray(kLength);
+ ASSERT_TRUE(array != nullptr);
+ jboolean elements[kLength];
+ env_->SetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+ reinterpret_cast<jbyte*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of byte primitive array elements with an object of type boolean[]");
+ env_->SetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+ reinterpret_cast<jshort*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of short primitive array elements with an object of type boolean[]");
+ env_->SetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+ reinterpret_cast<jchar*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of char primitive array elements with an object of type boolean[]");
+ env_->SetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+ reinterpret_cast<jint*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of int primitive array elements with an object of type boolean[]");
+ env_->SetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+ reinterpret_cast<jlong*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of long primitive array elements with an object of type boolean[]");
+ env_->SetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+ reinterpret_cast<jfloat*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of float primitive array elements with an object of type boolean[]");
+ env_->SetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+ reinterpret_cast<jdouble*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ "attempt to set region of boolean primitive array elements with an object of type java.lang.String");
+}
+
TEST_F(JniInternalTest, NewObjectArray) {
jclass element_class = env_->FindClass("java/lang/String");
- ASSERT_TRUE(element_class != nullptr);
+ ASSERT_NE(element_class, nullptr);
jclass array_class = env_->FindClass("[Ljava/lang/String;");
- ASSERT_TRUE(array_class != nullptr);
+ ASSERT_NE(array_class, nullptr);
jobjectArray a = env_->NewObjectArray(0, element_class, nullptr);
- EXPECT_TRUE(a != nullptr);
+ EXPECT_NE(a, nullptr);
EXPECT_TRUE(env_->IsInstanceOf(a, array_class));
EXPECT_EQ(0, env_->GetArrayLength(a));
a = env_->NewObjectArray(1, element_class, nullptr);
- EXPECT_TRUE(a != nullptr);
+ EXPECT_NE(a, nullptr);
EXPECT_TRUE(env_->IsInstanceOf(a, array_class));
EXPECT_EQ(1, env_->GetArrayLength(a));
EXPECT_TRUE(env_->IsSameObject(env_->GetObjectArrayElement(a, 0), nullptr));
-}
-TEST_F(JniInternalTest, NewObjectArrayWithNegativeLength) {
- jclass element_class = env_->FindClass("java/lang/String");
- ASSERT_TRUE(element_class != nullptr);
- jclass array_class = env_->FindClass("[Ljava/lang/String;");
- ASSERT_TRUE(array_class != nullptr);
+ // Negative array length checks.
CheckJniAbortCatcher jni_abort_catcher;
-
env_->NewObjectArray(-1, element_class, nullptr);
jni_abort_catcher.Check("negative array length: -1");
@@ -521,6 +819,8 @@
CheckJniAbortCatcher jni_abort_catcher;
for (size_t i = 0; i < strlen(primitive_descriptors); ++i) {
+ env_->NewObjectArray(0, nullptr, nullptr);
+ jni_abort_catcher.Check("element_jclass == null");
jclass primitive_class = GetPrimitiveClass(primitive_descriptors[i]);
env_->NewObjectArray(1, primitive_class, nullptr);
std::string error_msg(StringPrintf("not an object type: %s", primitive_names[i]));
@@ -530,13 +830,13 @@
TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
jclass element_class = env_->FindClass("java/lang/String");
- ASSERT_TRUE(element_class != nullptr);
+ ASSERT_NE(element_class, nullptr);
jclass array_class = env_->FindClass("[Ljava/lang/String;");
- ASSERT_TRUE(array_class != nullptr);
+ ASSERT_NE(array_class, nullptr);
jstring s = env_->NewStringUTF("poop");
jobjectArray a = env_->NewObjectArray(2, element_class, s);
- EXPECT_TRUE(a != nullptr);
+ EXPECT_NE(a, nullptr);
EXPECT_TRUE(env_->IsInstanceOf(a, array_class));
EXPECT_EQ(2, env_->GetArrayLength(a));
EXPECT_TRUE(env_->IsSameObject(env_->GetObjectArrayElement(a, 0), s));
@@ -555,9 +855,9 @@
TEST_F(JniInternalTest, GetObjectClass) {
jclass string_class = env_->FindClass("java/lang/String");
- ASSERT_TRUE(string_class != NULL);
+ ASSERT_NE(string_class, nullptr);
jclass class_class = env_->FindClass("java/lang/Class");
- ASSERT_TRUE(class_class != NULL);
+ ASSERT_NE(class_class, nullptr);
jstring s = env_->NewStringUTF("poop");
jclass c = env_->GetObjectClass(s);
@@ -565,33 +865,50 @@
jclass c2 = env_->GetObjectClass(c);
ASSERT_TRUE(env_->IsSameObject(class_class, env_->GetObjectClass(c2)));
+
+ // Null as object should fail.
+ CheckJniAbortCatcher jni_abort_catcher;
+ EXPECT_EQ(env_->GetObjectClass(nullptr), nullptr);
+ jni_abort_catcher.Check("java_object == null");
}
TEST_F(JniInternalTest, GetSuperclass) {
jclass object_class = env_->FindClass("java/lang/Object");
- ASSERT_TRUE(object_class != NULL);
+ ASSERT_NE(object_class, nullptr);
jclass string_class = env_->FindClass("java/lang/String");
- ASSERT_TRUE(string_class != NULL);
+ ASSERT_NE(string_class, nullptr);
jclass runnable_interface = env_->FindClass("java/lang/Runnable");
- ASSERT_TRUE(runnable_interface != NULL);
+ ASSERT_NE(runnable_interface, nullptr);
ASSERT_TRUE(env_->IsSameObject(object_class, env_->GetSuperclass(string_class)));
- ASSERT_TRUE(env_->GetSuperclass(object_class) == NULL);
+ ASSERT_EQ(env_->GetSuperclass(object_class), nullptr);
ASSERT_TRUE(env_->IsSameObject(object_class, env_->GetSuperclass(runnable_interface)));
+
+ // Null as class should fail.
+ CheckJniAbortCatcher jni_abort_catcher;
+ EXPECT_EQ(env_->GetSuperclass(nullptr), nullptr);
+ jni_abort_catcher.Check("java_class == null");
}
TEST_F(JniInternalTest, IsAssignableFrom) {
jclass object_class = env_->FindClass("java/lang/Object");
- ASSERT_TRUE(object_class != NULL);
+ ASSERT_NE(object_class, nullptr);
jclass string_class = env_->FindClass("java/lang/String");
- ASSERT_TRUE(string_class != NULL);
+ ASSERT_NE(string_class, nullptr);
ASSERT_TRUE(env_->IsAssignableFrom(object_class, string_class));
ASSERT_FALSE(env_->IsAssignableFrom(string_class, object_class));
+
+ // Null as either class should fail.
+ CheckJniAbortCatcher jni_abort_catcher;
+ EXPECT_EQ(env_->IsAssignableFrom(nullptr, string_class), JNI_FALSE);
+ jni_abort_catcher.Check("java_class1 == null");
+ EXPECT_EQ(env_->IsAssignableFrom(object_class, nullptr), JNI_FALSE);
+ jni_abort_catcher.Check("java_class2 == null");
}
TEST_F(JniInternalTest, GetObjectRefType) {
jclass local = env_->FindClass("java/lang/Object");
- ASSERT_TRUE(local != NULL);
+ ASSERT_TRUE(local != nullptr);
EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(local));
jobject global = env_->NewGlobalRef(local);
@@ -604,33 +921,38 @@
EXPECT_EQ(JNIInvalidRefType, env_->GetObjectRefType(invalid));
// TODO: invoke a native method and test that its arguments are considered local references.
+
+ // Null as object should fail.
+ CheckJniAbortCatcher jni_abort_catcher;
+ EXPECT_EQ(JNIInvalidRefType, env_->GetObjectRefType(nullptr));
+ jni_abort_catcher.Check("java_object == null");
}
TEST_F(JniInternalTest, StaleWeakGlobal) {
jclass java_lang_Class = env_->FindClass("java/lang/Class");
- ASSERT_TRUE(java_lang_Class != NULL);
- jobjectArray local_ref = env_->NewObjectArray(1, java_lang_Class, NULL);
- ASSERT_TRUE(local_ref != NULL);
+ ASSERT_NE(java_lang_Class, nullptr);
+ jobjectArray local_ref = env_->NewObjectArray(1, java_lang_Class, nullptr);
+ ASSERT_NE(local_ref, nullptr);
jweak weak_global = env_->NewWeakGlobalRef(local_ref);
- ASSERT_TRUE(weak_global != NULL);
+ ASSERT_NE(weak_global, nullptr);
env_->DeleteLocalRef(local_ref);
Runtime::Current()->GetHeap()->CollectGarbage(false); // GC should clear the weak global.
jobject new_global_ref = env_->NewGlobalRef(weak_global);
- EXPECT_TRUE(new_global_ref == NULL);
+ EXPECT_EQ(new_global_ref, nullptr);
jobject new_local_ref = env_->NewLocalRef(weak_global);
- EXPECT_TRUE(new_local_ref == NULL);
+ EXPECT_EQ(new_local_ref, nullptr);
}
TEST_F(JniInternalTest, NewStringUTF) {
- EXPECT_TRUE(env_->NewStringUTF(NULL) == NULL);
+ EXPECT_EQ(env_->NewStringUTF(nullptr), nullptr);
jstring s;
s = env_->NewStringUTF("");
- EXPECT_TRUE(s != NULL);
+ EXPECT_NE(s, nullptr);
EXPECT_EQ(0, env_->GetStringLength(s));
EXPECT_EQ(0, env_->GetStringUTFLength(s));
s = env_->NewStringUTF("hello");
- EXPECT_TRUE(s != NULL);
+ EXPECT_NE(s, nullptr);
EXPECT_EQ(5, env_->GetStringLength(s));
EXPECT_EQ(5, env_->GetStringUTFLength(s));
@@ -641,11 +963,11 @@
jchar chars[] = { 'h', 'i' };
jstring s;
s = env_->NewString(chars, 0);
- EXPECT_TRUE(s != NULL);
+ EXPECT_NE(s, nullptr);
EXPECT_EQ(0, env_->GetStringLength(s));
EXPECT_EQ(0, env_->GetStringUTFLength(s));
s = env_->NewString(chars, 2);
- EXPECT_TRUE(s != NULL);
+ EXPECT_NE(s, nullptr);
EXPECT_EQ(2, env_->GetStringLength(s));
EXPECT_EQ(2, env_->GetStringUTFLength(s));
@@ -654,7 +976,7 @@
TEST_F(JniInternalTest, NewStringNullCharsZeroLength) {
jstring s = env_->NewString(nullptr, 0);
- EXPECT_TRUE(s != nullptr);
+ EXPECT_NE(s, nullptr);
EXPECT_EQ(0, env_->GetStringLength(s));
}
@@ -678,16 +1000,16 @@
TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
jstring s = env_->NewStringUTF("hello");
- ASSERT_TRUE(s != NULL);
+ ASSERT_TRUE(s != nullptr);
- env_->GetStringRegion(s, -1, 0, NULL);
- EXPECT_EXCEPTION(sioobe_);
- env_->GetStringRegion(s, 0, -1, NULL);
- EXPECT_EXCEPTION(sioobe_);
- env_->GetStringRegion(s, 0, 10, NULL);
- EXPECT_EXCEPTION(sioobe_);
- env_->GetStringRegion(s, 10, 1, NULL);
- EXPECT_EXCEPTION(sioobe_);
+ env_->GetStringRegion(s, -1, 0, nullptr);
+ ExpectException(sioobe_);
+ env_->GetStringRegion(s, 0, -1, nullptr);
+ ExpectException(sioobe_);
+ env_->GetStringRegion(s, 0, 10, nullptr);
+ ExpectException(sioobe_);
+ env_->GetStringRegion(s, 10, 1, nullptr);
+ ExpectException(sioobe_);
jchar chars[4] = { 'x', 'x', 'x', 'x' };
env_->GetStringRegion(s, 1, 2, &chars[1]);
@@ -696,20 +1018,20 @@
EXPECT_EQ('l', chars[2]);
EXPECT_EQ('x', chars[3]);
- // It's okay for the buffer to be NULL as long as the length is 0.
- env_->GetStringRegion(s, 2, 0, NULL);
+ // It's okay for the buffer to be nullptr as long as the length is 0.
+ env_->GetStringRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
- env_->GetStringRegion(s, 123, 0, NULL);
- EXPECT_EXCEPTION(sioobe_);
+ env_->GetStringRegion(s, 123, 0, nullptr);
+ ExpectException(sioobe_);
- env_->GetStringUTFRegion(s, -1, 0, NULL);
- EXPECT_EXCEPTION(sioobe_);
- env_->GetStringUTFRegion(s, 0, -1, NULL);
- EXPECT_EXCEPTION(sioobe_);
- env_->GetStringUTFRegion(s, 0, 10, NULL);
- EXPECT_EXCEPTION(sioobe_);
- env_->GetStringUTFRegion(s, 10, 1, NULL);
- EXPECT_EXCEPTION(sioobe_);
+ env_->GetStringUTFRegion(s, -1, 0, nullptr);
+ ExpectException(sioobe_);
+ env_->GetStringUTFRegion(s, 0, -1, nullptr);
+ ExpectException(sioobe_);
+ env_->GetStringUTFRegion(s, 0, 10, nullptr);
+ ExpectException(sioobe_);
+ env_->GetStringUTFRegion(s, 10, 1, nullptr);
+ ExpectException(sioobe_);
char bytes[4] = { 'x', 'x', 'x', 'x' };
env_->GetStringUTFRegion(s, 1, 2, &bytes[1]);
@@ -718,25 +1040,25 @@
EXPECT_EQ('l', bytes[2]);
EXPECT_EQ('x', bytes[3]);
- // It's okay for the buffer to be NULL as long as the length is 0.
- env_->GetStringUTFRegion(s, 2, 0, NULL);
+ // It's okay for the buffer to be nullptr as long as the length is 0.
+ env_->GetStringUTFRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
- env_->GetStringUTFRegion(s, 123, 0, NULL);
- EXPECT_EXCEPTION(sioobe_);
+ env_->GetStringUTFRegion(s, 123, 0, nullptr);
+ ExpectException(sioobe_);
}
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
- // Passing in a NULL jstring is ignored normally, but caught by -Xcheck:jni.
+ // Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
{
CheckJniAbortCatcher check_jni_abort_catcher;
- EXPECT_TRUE(env_->GetStringUTFChars(NULL, NULL) == NULL);
+ EXPECT_EQ(env_->GetStringUTFChars(nullptr, nullptr), nullptr);
check_jni_abort_catcher.Check("GetStringUTFChars received null jstring");
}
jstring s = env_->NewStringUTF("hello");
- ASSERT_TRUE(s != NULL);
+ ASSERT_TRUE(s != nullptr);
- const char* utf = env_->GetStringUTFChars(s, NULL);
+ const char* utf = env_->GetStringUTFChars(s, nullptr);
EXPECT_STREQ("hello", utf);
env_->ReleaseStringUTFChars(s, utf);
@@ -749,10 +1071,10 @@
TEST_F(JniInternalTest, GetStringChars_ReleaseStringChars) {
jstring s = env_->NewStringUTF("hello");
- ASSERT_TRUE(s != NULL);
+ ASSERT_TRUE(s != nullptr);
jchar expected[] = { 'h', 'e', 'l', 'l', 'o' };
- const jchar* chars = env_->GetStringChars(s, NULL);
+ const jchar* chars = env_->GetStringChars(s, nullptr);
EXPECT_EQ(expected[0], chars[0]);
EXPECT_EQ(expected[1], chars[1]);
EXPECT_EQ(expected[2], chars[2]);
@@ -773,10 +1095,10 @@
TEST_F(JniInternalTest, GetStringCritical_ReleaseStringCritical) {
jstring s = env_->NewStringUTF("hello");
- ASSERT_TRUE(s != NULL);
+ ASSERT_TRUE(s != nullptr);
jchar expected[] = { 'h', 'e', 'l', 'l', 'o' };
- const jchar* chars = env_->GetStringCritical(s, NULL);
+ const jchar* chars = env_->GetStringCritical(s, nullptr);
EXPECT_EQ(expected[0], chars[0]);
EXPECT_EQ(expected[1], chars[1]);
EXPECT_EQ(expected[2], chars[2]);
@@ -798,45 +1120,72 @@
TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
jclass java_lang_Class = env_->FindClass("java/lang/Class");
- ASSERT_TRUE(java_lang_Class != NULL);
+ ASSERT_TRUE(java_lang_Class != nullptr);
- jobjectArray array = env_->NewObjectArray(1, java_lang_Class, NULL);
- EXPECT_TRUE(array != NULL);
- EXPECT_TRUE(env_->GetObjectArrayElement(array, 0) == NULL);
+ jobjectArray array = env_->NewObjectArray(1, java_lang_Class, nullptr);
+ EXPECT_NE(array, nullptr);
+ EXPECT_EQ(env_->GetObjectArrayElement(array, 0), nullptr);
env_->SetObjectArrayElement(array, 0, java_lang_Class);
EXPECT_TRUE(env_->IsSameObject(env_->GetObjectArrayElement(array, 0), java_lang_Class));
// ArrayIndexOutOfBounds for negative index.
env_->SetObjectArrayElement(array, -1, java_lang_Class);
- EXPECT_EXCEPTION(aioobe_);
+ ExpectException(aioobe_);
// ArrayIndexOutOfBounds for too-large index.
env_->SetObjectArrayElement(array, 1, java_lang_Class);
- EXPECT_EXCEPTION(aioobe_);
+ ExpectException(aioobe_);
// ArrayStoreException thrown for bad types.
env_->SetObjectArrayElement(array, 0, env_->NewStringUTF("not a jclass!"));
- EXPECT_EXCEPTION(ase_);
+ ExpectException(ase_);
+
+ // Null as array should fail.
+ CheckJniAbortCatcher jni_abort_catcher;
+ EXPECT_EQ(nullptr, env_->GetObjectArrayElement(nullptr, 0));
+ jni_abort_catcher.Check("java_array == null");
+ env_->SetObjectArrayElement(nullptr, 0, nullptr);
+ jni_abort_catcher.Check("java_array == null");
}
#define EXPECT_STATIC_PRIMITIVE_FIELD(type, field_name, sig, value1, value2) \
do { \
jfieldID fid = env_->GetStaticFieldID(c, field_name, sig); \
- EXPECT_TRUE(fid != NULL); \
+ EXPECT_NE(fid, nullptr); \
env_->SetStatic ## type ## Field(c, fid, value1); \
- EXPECT_TRUE(value1 == env_->GetStatic ## type ## Field(c, fid)); \
+ EXPECT_EQ(value1, env_->GetStatic ## type ## Field(c, fid)); \
env_->SetStatic ## type ## Field(c, fid, value2); \
- EXPECT_TRUE(value2 == env_->GetStatic ## type ## Field(c, fid)); \
+ EXPECT_EQ(value2, env_->GetStatic ## type ## Field(c, fid)); \
+ \
+ CheckJniAbortCatcher jni_abort_catcher; \
+ env_->GetStatic ## type ## Field(nullptr, fid); \
+ jni_abort_catcher.Check("received null jclass"); \
+ env_->SetStatic ## type ## Field(nullptr, fid, value1); \
+ jni_abort_catcher.Check("received null jclass"); \
+ env_->GetStatic ## type ## Field(c, nullptr); \
+ jni_abort_catcher.Check("fid == null"); \
+ env_->SetStatic ## type ## Field(c, nullptr, value1); \
+ jni_abort_catcher.Check("fid == null"); \
} while (false)
#define EXPECT_PRIMITIVE_FIELD(instance, type, field_name, sig, value1, value2) \
do { \
jfieldID fid = env_->GetFieldID(c, field_name, sig); \
- EXPECT_TRUE(fid != NULL); \
+ EXPECT_NE(fid, nullptr); \
env_->Set ## type ## Field(instance, fid, value1); \
- EXPECT_TRUE(value1 == env_->Get ## type ## Field(instance, fid)); \
+ EXPECT_EQ(value1, env_->Get ## type ## Field(instance, fid)); \
env_->Set ## type ## Field(instance, fid, value2); \
- EXPECT_TRUE(value2 == env_->Get ## type ## Field(instance, fid)); \
+ EXPECT_EQ(value2, env_->Get ## type ## Field(instance, fid)); \
+ \
+ CheckJniAbortCatcher jni_abort_catcher; \
+ env_->Get ## type ## Field(nullptr, fid); \
+ jni_abort_catcher.Check("obj == null"); \
+ env_->Set ## type ## Field(nullptr, fid, value1); \
+ jni_abort_catcher.Check("obj == null"); \
+ env_->Get ## type ## Field(instance, nullptr); \
+ jni_abort_catcher.Check("fid == null"); \
+ env_->Set ## type ## Field(instance, nullptr, value1); \
+ jni_abort_catcher.Check("fid == null"); \
} while (false)
@@ -845,14 +1194,14 @@
Thread::Current()->TransitionFromSuspendedToRunnable();
LoadDex("AllFields");
bool started = runtime_->Start();
- CHECK(started);
+ ASSERT_TRUE(started);
jclass c = env_->FindClass("AllFields");
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
jobject o = env_->AllocObject(c);
- ASSERT_TRUE(o != NULL);
+ ASSERT_NE(o, nullptr);
- EXPECT_STATIC_PRIMITIVE_FIELD(Boolean, "sZ", "Z", true, false);
+ EXPECT_STATIC_PRIMITIVE_FIELD(Boolean, "sZ", "Z", JNI_TRUE, JNI_FALSE);
EXPECT_STATIC_PRIMITIVE_FIELD(Byte, "sB", "B", 1, 2);
EXPECT_STATIC_PRIMITIVE_FIELD(Char, "sC", "C", 'a', 'b');
EXPECT_STATIC_PRIMITIVE_FIELD(Double, "sD", "D", 1.0, 2.0);
@@ -861,7 +1210,7 @@
EXPECT_STATIC_PRIMITIVE_FIELD(Long, "sJ", "J", 1, 2);
EXPECT_STATIC_PRIMITIVE_FIELD(Short, "sS", "S", 1, 2);
- EXPECT_PRIMITIVE_FIELD(o, Boolean, "iZ", "Z", true, false);
+ EXPECT_PRIMITIVE_FIELD(o, Boolean, "iZ", "Z", JNI_TRUE, JNI_FALSE);
EXPECT_PRIMITIVE_FIELD(o, Byte, "iB", "B", 1, 2);
EXPECT_PRIMITIVE_FIELD(o, Char, "iC", "C", 'a', 'b');
EXPECT_PRIMITIVE_FIELD(o, Double, "iD", "D", 1.0, 2.0);
@@ -878,19 +1227,19 @@
runtime_->Start();
jclass c = env_->FindClass("AllFields");
- ASSERT_TRUE(c != NULL);
+ ASSERT_NE(c, nullptr);
jobject o = env_->AllocObject(c);
- ASSERT_TRUE(o != NULL);
+ ASSERT_NE(o, nullptr);
jstring s1 = env_->NewStringUTF("hello");
- ASSERT_TRUE(s1 != NULL);
+ ASSERT_NE(s1, nullptr);
jstring s2 = env_->NewStringUTF("world");
- ASSERT_TRUE(s2 != NULL);
+ ASSERT_NE(s2, nullptr);
jfieldID s_fid = env_->GetStaticFieldID(c, "sObject", "Ljava/lang/Object;");
- ASSERT_TRUE(s_fid != NULL);
+ ASSERT_NE(s_fid, nullptr);
jfieldID i_fid = env_->GetFieldID(c, "iObject", "Ljava/lang/Object;");
- ASSERT_TRUE(i_fid != NULL);
+ ASSERT_NE(i_fid, nullptr);
env_->SetStaticObjectField(c, s_fid, s1);
ASSERT_TRUE(env_->IsSameObject(s1, env_->GetStaticObjectField(c, s_fid)));
@@ -903,27 +1252,27 @@
ASSERT_TRUE(env_->IsSameObject(s2, env_->GetObjectField(o, i_fid)));
}
-TEST_F(JniInternalTest, NewLocalRef_NULL) {
- EXPECT_TRUE(env_->NewLocalRef(NULL) == NULL);
+TEST_F(JniInternalTest, NewLocalRef_nullptr) {
+ EXPECT_EQ(env_->NewLocalRef(nullptr), nullptr);
}
TEST_F(JniInternalTest, NewLocalRef) {
jstring s = env_->NewStringUTF("");
- ASSERT_TRUE(s != NULL);
+ ASSERT_NE(s, nullptr);
jobject o = env_->NewLocalRef(s);
- EXPECT_TRUE(o != NULL);
- EXPECT_TRUE(o != s);
+ EXPECT_NE(o, nullptr);
+ EXPECT_NE(o, s);
EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(o));
}
-TEST_F(JniInternalTest, DeleteLocalRef_NULL) {
- env_->DeleteLocalRef(NULL);
+TEST_F(JniInternalTest, DeleteLocalRef_nullptr) {
+ env_->DeleteLocalRef(nullptr);
}
TEST_F(JniInternalTest, DeleteLocalRef) {
jstring s = env_->NewStringUTF("");
- ASSERT_TRUE(s != NULL);
+ ASSERT_NE(s, nullptr);
env_->DeleteLocalRef(s);
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
@@ -937,9 +1286,9 @@
}
s = env_->NewStringUTF("");
- ASSERT_TRUE(s != NULL);
+ ASSERT_NE(s, nullptr);
jobject o = env_->NewLocalRef(s);
- ASSERT_TRUE(o != NULL);
+ ASSERT_NE(o, nullptr);
env_->DeleteLocalRef(s);
env_->DeleteLocalRef(o);
@@ -951,7 +1300,7 @@
// Android historically treated it, and it's how the RI treats it. It's also the more useful
// interpretation!
ASSERT_EQ(JNI_OK, env_->PushLocalFrame(0));
- env_->PopLocalFrame(NULL);
+ env_->PopLocalFrame(nullptr);
// Negative capacities are not allowed.
ASSERT_EQ(JNI_ERR, env_->PushLocalFrame(-1));
@@ -962,7 +1311,7 @@
TEST_F(JniInternalTest, PushLocalFrame_PopLocalFrame) {
jobject original = env_->NewStringUTF("");
- ASSERT_TRUE(original != NULL);
+ ASSERT_NE(original, nullptr);
jobject outer;
jobject inner1, inner2;
@@ -988,7 +1337,7 @@
// gets a new local reference...
EXPECT_EQ(JNIInvalidRefType, env_->GetObjectRefType(inner2));
- env_->PopLocalFrame(NULL);
+ env_->PopLocalFrame(nullptr);
}
EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(original));
EXPECT_EQ(JNIInvalidRefType, env_->GetObjectRefType(outer));
@@ -996,30 +1345,30 @@
EXPECT_EQ(JNIInvalidRefType, env_->GetObjectRefType(inner2));
}
-TEST_F(JniInternalTest, NewGlobalRef_NULL) {
- EXPECT_TRUE(env_->NewGlobalRef(NULL) == NULL);
+TEST_F(JniInternalTest, NewGlobalRef_nullptr) {
+ EXPECT_EQ(env_->NewGlobalRef(nullptr), nullptr);
}
TEST_F(JniInternalTest, NewGlobalRef) {
jstring s = env_->NewStringUTF("");
- ASSERT_TRUE(s != NULL);
+ ASSERT_NE(s, nullptr);
jobject o = env_->NewGlobalRef(s);
- EXPECT_TRUE(o != NULL);
- EXPECT_TRUE(o != s);
+ EXPECT_NE(o, nullptr);
+ EXPECT_NE(o, s);
- // TODO: check that o is a global reference.
+ EXPECT_EQ(env_->GetObjectRefType(o), JNIGlobalRefType);
}
-TEST_F(JniInternalTest, DeleteGlobalRef_NULL) {
- env_->DeleteGlobalRef(NULL);
+TEST_F(JniInternalTest, DeleteGlobalRef_nullptr) {
+ env_->DeleteGlobalRef(nullptr);
}
TEST_F(JniInternalTest, DeleteGlobalRef) {
jstring s = env_->NewStringUTF("");
- ASSERT_TRUE(s != NULL);
+ ASSERT_NE(s, nullptr);
jobject o = env_->NewGlobalRef(s);
- ASSERT_TRUE(o != NULL);
+ ASSERT_NE(o, nullptr);
env_->DeleteGlobalRef(o);
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
@@ -1033,38 +1382,38 @@
}
jobject o1 = env_->NewGlobalRef(s);
- ASSERT_TRUE(o1 != NULL);
+ ASSERT_NE(o1, nullptr);
jobject o2 = env_->NewGlobalRef(s);
- ASSERT_TRUE(o2 != NULL);
+ ASSERT_NE(o2, nullptr);
env_->DeleteGlobalRef(o1);
env_->DeleteGlobalRef(o2);
}
-TEST_F(JniInternalTest, NewWeakGlobalRef_NULL) {
- EXPECT_TRUE(env_->NewWeakGlobalRef(NULL) == NULL);
+TEST_F(JniInternalTest, NewWeakGlobalRef_nullptr) {
+ EXPECT_EQ(env_->NewWeakGlobalRef(nullptr), nullptr);
}
TEST_F(JniInternalTest, NewWeakGlobalRef) {
jstring s = env_->NewStringUTF("");
- ASSERT_TRUE(s != NULL);
+ ASSERT_NE(s, nullptr);
jobject o = env_->NewWeakGlobalRef(s);
- EXPECT_TRUE(o != NULL);
- EXPECT_TRUE(o != s);
+ EXPECT_NE(o, nullptr);
+ EXPECT_NE(o, s);
- // TODO: check that o is a weak global reference.
+ EXPECT_EQ(env_->GetObjectRefType(o), JNIWeakGlobalRefType);
}
-TEST_F(JniInternalTest, DeleteWeakGlobalRef_NULL) {
- env_->DeleteWeakGlobalRef(NULL);
+TEST_F(JniInternalTest, DeleteWeakGlobalRef_nullptr) {
+ env_->DeleteWeakGlobalRef(nullptr);
}
TEST_F(JniInternalTest, DeleteWeakGlobalRef) {
jstring s = env_->NewStringUTF("");
- ASSERT_TRUE(s != NULL);
+ ASSERT_NE(s, nullptr);
jobject o = env_->NewWeakGlobalRef(s);
- ASSERT_TRUE(o != NULL);
+ ASSERT_NE(o, nullptr);
env_->DeleteWeakGlobalRef(o);
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
@@ -1078,21 +1427,21 @@
}
jobject o1 = env_->NewWeakGlobalRef(s);
- ASSERT_TRUE(o1 != NULL);
+ ASSERT_NE(o1, nullptr);
jobject o2 = env_->NewWeakGlobalRef(s);
- ASSERT_TRUE(o2 != NULL);
+ ASSERT_NE(o2, nullptr);
env_->DeleteWeakGlobalRef(o1);
env_->DeleteWeakGlobalRef(o2);
}
TEST_F(JniInternalTest, Throw) {
- EXPECT_EQ(JNI_ERR, env_->Throw(NULL));
+ EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
jclass exception_class = env_->FindClass("java/lang/RuntimeException");
- ASSERT_TRUE(exception_class != NULL);
+ ASSERT_TRUE(exception_class != nullptr);
jthrowable exception = reinterpret_cast<jthrowable>(env_->AllocObject(exception_class));
- ASSERT_TRUE(exception != NULL);
+ ASSERT_TRUE(exception != nullptr);
EXPECT_EQ(JNI_OK, env_->Throw(exception));
EXPECT_TRUE(env_->ExceptionCheck());
@@ -1102,10 +1451,10 @@
}
TEST_F(JniInternalTest, ThrowNew) {
- EXPECT_EQ(JNI_ERR, env_->Throw(NULL));
+ EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
jclass exception_class = env_->FindClass("java/lang/RuntimeException");
- ASSERT_TRUE(exception_class != NULL);
+ ASSERT_TRUE(exception_class != nullptr);
jthrowable thrown_exception;
@@ -1115,7 +1464,7 @@
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, exception_class));
- EXPECT_EQ(JNI_OK, env_->ThrowNew(exception_class, NULL));
+ EXPECT_EQ(JNI_OK, env_->ThrowNew(exception_class, nullptr));
EXPECT_TRUE(env_->ExceptionCheck());
thrown_exception = env_->ExceptionOccurred();
env_->ExceptionClear();
@@ -1141,26 +1490,26 @@
ASSERT_TRUE(started);
jclass buffer_class = env_->FindClass("java/nio/Buffer");
- ASSERT_TRUE(buffer_class != NULL);
+ ASSERT_NE(buffer_class, nullptr);
char bytes[1024];
jobject buffer = env_->NewDirectByteBuffer(bytes, sizeof(bytes));
- ASSERT_TRUE(buffer != NULL);
+ ASSERT_NE(buffer, nullptr);
ASSERT_TRUE(env_->IsInstanceOf(buffer, buffer_class));
- ASSERT_TRUE(env_->GetDirectBufferAddress(buffer) == bytes);
- ASSERT_TRUE(env_->GetDirectBufferCapacity(buffer) == sizeof(bytes));
+ ASSERT_EQ(env_->GetDirectBufferAddress(buffer), bytes);
+ ASSERT_EQ(env_->GetDirectBufferCapacity(buffer), static_cast<jlong>(sizeof(bytes)));
}
TEST_F(JniInternalTest, MonitorEnterExit) {
- // Create an object to torture
+ // Create an object to torture.
jclass object_class = env_->FindClass("java/lang/Object");
- ASSERT_TRUE(object_class != NULL);
+ ASSERT_NE(object_class, nullptr);
jobject object = env_->AllocObject(object_class);
- ASSERT_TRUE(object != NULL);
+ ASSERT_NE(object, nullptr);
// Expected class of exceptions
jclass imse_class = env_->FindClass("java/lang/IllegalMonitorStateException");
- ASSERT_TRUE(imse_class != NULL);
+ ASSERT_NE(imse_class, nullptr);
jthrowable thrown_exception;
@@ -1197,13 +1546,13 @@
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, imse_class));
- // It's an error to call MonitorEnter or MonitorExit on NULL.
+ // It's an error to call MonitorEnter or MonitorExit on nullptr.
{
CheckJniAbortCatcher check_jni_abort_catcher;
- env_->MonitorEnter(NULL);
+ env_->MonitorEnter(nullptr);
check_jni_abort_catcher.Check("in call to MonitorEnter");
- env_->MonitorExit(NULL);
+ env_->MonitorExit(nullptr);
check_jni_abort_catcher.Check("in call to MonitorExit");
}
}
@@ -1215,7 +1564,7 @@
jint err = vm_->DetachCurrentThread();
EXPECT_EQ(JNI_ERR, err);
- vm_->AttachCurrentThread(&env_, NULL); // need attached thread for CommonRuntimeTest::TearDown
+ vm_->AttachCurrentThread(&env_, nullptr); // need attached thread for CommonRuntimeTest::TearDown
}
} // namespace art
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1594338..5225919 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -19,7 +19,7 @@
#include <inttypes.h>
#include <backtrace/BacktraceMap.h>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "base/stringprintf.h"
#include "ScopedFd.h"
#include "utils.h"
@@ -128,6 +128,20 @@
// We need to store and potentially set an error number for pretty printing of errors
int saved_errno = 0;
+#ifdef __LP64__
+ // When requesting low_4g memory and having an expectation, the requested range should fit into
+ // 4GB.
+ if (low_4gb && (
+ // Start out of bounds.
+ (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
+ // End out of bounds. For simplicity, this will fail for the last page of memory.
+ (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
+ *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
+ expected, expected + page_aligned_byte_count);
+ return nullptr;
+ }
+#endif
+
// TODO:
// A page allocator would be a useful abstraction here, as
// 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
@@ -192,7 +206,7 @@
#else
#ifdef __x86_64__
- if (low_4gb) {
+ if (low_4gb && expected == nullptr) {
flags |= MAP_32BIT;
}
#endif
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index eea3307..b26f563 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -16,7 +16,7 @@
#include "mem_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "gtest/gtest.h"
namespace art {
@@ -163,4 +163,57 @@
}
#endif
+TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
+ std::string error_msg;
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+ reinterpret_cast<byte*>(0x71000000),
+ 0x21000000,
+ PROT_READ | PROT_WRITE,
+ true,
+ &error_msg));
+ ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 0x71000000U);
+}
+
+TEST_F(MemMapTest, MapAnonymousOverflow) {
+ std::string error_msg;
+ uintptr_t ptr = 0;
+ ptr -= kPageSize; // Now it's close to the top.
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
+ reinterpret_cast<byte*>(ptr),
+ 2 * kPageSize, // brings it over the top.
+ PROT_READ | PROT_WRITE,
+ false,
+ &error_msg));
+ ASSERT_EQ(nullptr, map.get());
+ ASSERT_FALSE(error_msg.empty());
+}
+
+#ifdef __LP64__
+TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
+ std::string error_msg;
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
+ reinterpret_cast<byte*>(UINT64_C(0x100000000)),
+ kPageSize,
+ PROT_READ | PROT_WRITE,
+ true,
+ &error_msg));
+ ASSERT_EQ(nullptr, map.get());
+ ASSERT_FALSE(error_msg.empty());
+}
+
+TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
+ std::string error_msg;
+ UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
+ reinterpret_cast<byte*>(0xF0000000),
+ 0x20000000,
+ PROT_READ | PROT_WRITE,
+ true,
+ &error_msg));
+ ASSERT_EQ(nullptr, map.get());
+ ASSERT_FALSE(error_msg.empty());
+}
+#endif
+
} // namespace art
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 139e2d0..552652c 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -26,7 +26,7 @@
#include "object_array.h"
#include "object_array-inl.h"
#include "object_utils.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "utils.h"
@@ -42,22 +42,25 @@
// Recursively create an array with multiple dimensions. Elements may be
// Objects or primitive types.
static Array* RecursiveCreateMultiArray(Thread* self,
- const SirtRef<Class>& array_class, int current_dimension,
- const SirtRef<mirror::IntArray>& dimensions)
+ const Handle<Class>& array_class, int current_dimension,
+ const Handle<mirror::IntArray>& dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
int32_t array_length = dimensions->Get(current_dimension);
- SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class.get(), array_length,
- array_class->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
- if (UNLIKELY(new_array.get() == nullptr)) {
+ StackHandleScope<1> hs(self);
+ Handle<Array> new_array(
+ hs.NewHandle(
+ Array::Alloc<true>(self, array_class.Get(), array_length, array_class->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator())));
+ if (UNLIKELY(new_array.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
}
if (current_dimension + 1 < dimensions->GetLength()) {
// Create a new sub-array in every element of the array.
for (int32_t i = 0; i < array_length; i++) {
- SirtRef<mirror::Class> sirt_component_type(self, array_class->GetComponentType());
- Array* sub_array = RecursiveCreateMultiArray(self, sirt_component_type,
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_component_type(hs.NewHandle(array_class->GetComponentType()));
+ Array* sub_array = RecursiveCreateMultiArray(self, h_component_type,
current_dimension + 1, dimensions);
if (UNLIKELY(sub_array == nullptr)) {
CHECK(self->IsExceptionPending());
@@ -67,11 +70,11 @@
new_array->AsObjectArray<Array>()->Set<false, false>(i, sub_array);
}
}
- return new_array.get();
+ return new_array.Get();
}
-Array* Array::CreateMultiArray(Thread* self, const SirtRef<Class>& element_class,
- const SirtRef<IntArray>& dimensions) {
+Array* Array::CreateMultiArray(Thread* self, const Handle<Class>& element_class,
+ const Handle<IntArray>& dimensions) {
// Verify dimensions.
//
// The caller is responsible for verifying that "dimArray" is non-null
@@ -90,15 +93,16 @@
// Find/generate the array class.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> array_class(self,
- class_linker->FindArrayClass(self, element_class.get()));
- if (UNLIKELY(array_class.get() == nullptr)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> array_class(
+ hs.NewHandle(class_linker->FindArrayClass(self, element_class.Get())));
+ if (UNLIKELY(array_class.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
}
for (int32_t i = 1; i < dimensions->GetLength(); ++i) {
- array_class.reset(class_linker->FindArrayClass(self, array_class.get()));
- if (UNLIKELY(array_class.get() == nullptr)) {
+ array_class.Assign(class_linker->FindArrayClass(self, array_class.Get()));
+ if (UNLIKELY(array_class.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index eead4eb..1b8106e 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -23,7 +23,7 @@
namespace art {
-template<class T> class SirtRef;
+template<class T> class Handle;
namespace mirror {
@@ -38,8 +38,8 @@
bool fill_usable = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Array* CreateMultiArray(Thread* self, const SirtRef<Class>& element_class,
- const SirtRef<IntArray>& dimensions)
+ static Array* CreateMultiArray(Thread* self, const Handle<Class>& element_class,
+ const Handle<IntArray>& dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -155,14 +155,19 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetArrayClass(Class* array_class) {
- CHECK(array_class_ == NULL);
- CHECK(array_class != NULL);
+ CHECK(array_class_ == nullptr);
+ CHECK(array_class != nullptr);
array_class_ = array_class;
}
+ static Class* GetArrayClass() {
+ DCHECK(array_class_ != nullptr);
+ return array_class_;
+ }
+
static void ResetArrayClass() {
- CHECK(array_class_ != NULL);
- array_class_ = NULL;
+ CHECK(array_class_ != nullptr);
+ array_class_ = nullptr;
}
static void VisitRoots(RootCallback* callback, void* arg)
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 8eb30f9..86c5c3f 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -29,7 +29,7 @@
namespace art {
namespace mirror {
-// TODO: get global references for these
+// TODO: Get global references for these
Class* ArtField::java_lang_reflect_ArtField_ = NULL;
ArtField* ArtField::FromReflectedField(const ScopedObjectAccess& soa, jobject jlr_field) {
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index fb9a09a..c3e2d22 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -23,7 +23,8 @@
#include "entrypoints/entrypoint_utils.h"
#include "object_array.h"
#include "oat.h"
-#include "runtime.h"
+#include "quick/quick_method_frame_info.h"
+#include "runtime-inl.h"
namespace art {
namespace mirror {
@@ -81,7 +82,7 @@
if (code == nullptr) {
return 0u;
}
- return reinterpret_cast<const OatMethodHeader*>(code)[-1].code_size_;
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
}
inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
@@ -201,6 +202,40 @@
OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
}
+inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
+ if (UNLIKELY(IsPortableCompiled())) {
+ // Portable compiled dex bytecode or jni stub.
+ return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
+ }
+ Runtime* runtime = Runtime::Current();
+ if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod())) {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+ if (UNLIKELY(IsRuntimeMethod())) {
+ return runtime->GetRuntimeMethodFrameInfo(this);
+ }
+
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+ // for non-native methods. And we really shouldn't see a failure for non-native methods here.
+ DCHECK(entry_point != GetQuickToInterpreterBridgeTrampoline(runtime->GetClassLinker()));
+ CHECK(entry_point != GetQuickToInterpreterBridge());
+
+ if (UNLIKELY(entry_point == GetQuickGenericJniTrampoline())) {
+ // Generic JNI frame.
+ DCHECK(IsNative());
+ uint32_t handle_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1;
+ size_t scope_size = HandleScope::GetAlignedHandleScopeSize(handle_refs);
+ QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + scope_size,
+ callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+ }
+
+ const void* code_pointer = EntryPointToCodePointer(entry_point);
+ return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 4275f25..495ae2d 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -230,14 +230,15 @@
return 0;
}
-uint32_t ArtMethod::FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_pc,
- bool* has_no_move_exception) {
+uint32_t ArtMethod::FindCatchBlock(Handle<Class>& exception_type, uint32_t dex_pc,
+ bool* has_no_move_exception, bool* exc_changed) {
MethodHelper mh(this);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
// Set aside the exception while we resolve its type.
Thread* self = Thread::Current();
ThrowLocation throw_location;
- SirtRef<mirror::Throwable> exception(self, self->GetException(&throw_location));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException(&throw_location)));
self->ClearException();
// Default to handler not found.
uint32_t found_dex_pc = DexFile::kDexNoIndex;
@@ -251,11 +252,18 @@
}
// Does this catch exception type apply?
Class* iter_exception_type = mh.GetClassFromTypeIdx(iter_type_idx);
- if (exception_type.get() == nullptr) {
- self->ClearException();
+ if (iter_exception_type == nullptr) {
+ // Now have a NoClassDefFoundError as exception.
+ // Note: this is not RI behavior. RI would have failed when loading the class.
+ *exc_changed = true;
+
+ // TODO: Add old exception as suppressed.
LOG(WARNING) << "Unresolved exception class when finding catch block: "
<< mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
- } else if (iter_exception_type->IsAssignableFrom(exception_type.get())) {
+
+ // Return immediately.
+ return DexFile::kDexNoIndex;
+ } else if (iter_exception_type->IsAssignableFrom(exception_type.Get())) {
found_dex_pc = it.GetHandlerAddress();
break;
}
@@ -266,8 +274,8 @@
*has_no_move_exception = (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION);
}
// Put the exception back.
- if (exception.get() != nullptr) {
- self->SetException(throw_location, exception.get());
+ if (exception.Get() != nullptr) {
+ self->SetException(throw_location, exception.Get());
}
return found_dex_pc;
}
@@ -389,7 +397,7 @@
if (code == nullptr) {
return nullptr;
}
- uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].mapping_table_offset_;
+ uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].mapping_table_offset_;
if (UNLIKELY(offset == 0u)) {
return nullptr;
}
@@ -401,7 +409,7 @@
if (code == nullptr) {
return nullptr;
}
- uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].vmap_table_offset_;
+ uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].vmap_table_offset_;
if (UNLIKELY(offset == 0u)) {
return nullptr;
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 71f0210..3950a98 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -23,6 +23,7 @@
#include "modifiers.h"
#include "object.h"
#include "object_callbacks.h"
+#include "quick/quick_method_frame_info.h"
namespace art {
@@ -318,25 +319,20 @@
template <bool kCheckFrameSize = true>
uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_));
+ uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
if (kCheckFrameSize) {
DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
}
return result;
}
- void SetFrameSizeInBytes(size_t new_frame_size_in_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_),
- new_frame_size_in_bytes);
- }
+ QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t GetReturnPcOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFrameSizeInBytes() - kPointerSize;
}
- size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return kPointerSize;
}
@@ -362,26 +358,6 @@
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
}
- uint32_t GetCoreSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_));
- }
-
- void SetCoreSpillMask(uint32_t core_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Computed during compilation.
- // Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), core_spill_mask);
- }
-
- uint32_t GetFpSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_));
- }
-
- void SetFpSpillMask(uint32_t fp_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Computed during compilation.
- // Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), fp_spill_mask);
- }
-
// Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
// conventions for a method of managed code. Returns false for Proxy methods.
bool IsRuntimeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -405,8 +381,11 @@
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
// a move-exception instruction is present.
- uint32_t FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_pc,
- bool* has_no_move_exception)
+ // In the process of finding a catch block we might trigger resolution errors. This is flagged
+ // by exc_changed, which indicates that a different exception is now stored in the thread and
+ // should be reloaded.
+ uint32_t FindCatchBlock(Handle<Class>& exception_type, uint32_t dex_pc,
+ bool* has_no_move_exception, bool* exc_changed)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetClass(Class* java_lang_reflect_ArtMethod);
@@ -474,20 +453,6 @@
// ifTable.
uint32_t method_index_;
- // --- Quick compiler meta-data. ---
- // TODO: merge and place in native heap, such as done with the code size.
-
- // Bit map of spilled machine registers.
- uint32_t quick_core_spill_mask_;
-
- // Bit map of spilled floating point machine registers.
- uint32_t quick_fp_spill_mask_;
-
- // Fixed frame size for this method when executed.
- uint32_t quick_frame_size_in_bytes_;
-
- // --- End of quick compiler meta-data. ---
-
static Class* java_lang_reflect_ArtMethod_;
private:
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index ff63782..15b69f3 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -28,7 +28,7 @@
#include "object_array-inl.h"
#include "object_utils.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "throwable.h"
#include "utils.h"
@@ -77,20 +77,13 @@
<< "Attempt to set as erroneous an already erroneous class " << PrettyClass(this);
// Stash current exception.
- SirtRef<mirror::Object> old_throw_this_object(self, NULL);
- SirtRef<mirror::ArtMethod> old_throw_method(self, NULL);
- SirtRef<mirror::Throwable> old_exception(self, NULL);
- uint32_t old_throw_dex_pc;
- {
- ThrowLocation old_throw_location;
- mirror::Throwable* old_exception_obj = self->GetException(&old_throw_location);
- old_throw_this_object.reset(old_throw_location.GetThis());
- old_throw_method.reset(old_throw_location.GetMethod());
- old_exception.reset(old_exception_obj);
- old_throw_dex_pc = old_throw_location.GetDexPc();
- self->ClearException();
- }
- CHECK(old_exception.get() != NULL);
+ StackHandleScope<3> hs(self);
+ ThrowLocation old_throw_location;
+ Handle<mirror::Throwable> old_exception(hs.NewHandle(self->GetException(&old_throw_location)));
+ CHECK(old_exception.Get() != nullptr);
+ Handle<mirror::Object> old_throw_this_object(hs.NewHandle(old_throw_location.GetThis()));
+ Handle<mirror::ArtMethod> old_throw_method(hs.NewHandle(old_throw_location.GetMethod()));
+ uint32_t old_throw_dex_pc = old_throw_location.GetDexPc();
// clear exception to call FindSystemClass
self->ClearException();
@@ -107,10 +100,10 @@
}
// Restore exception.
- ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
old_throw_dex_pc);
- self->SetException(gc_safe_throw_location, old_exception.get());
+ self->SetException(gc_safe_throw_location, old_exception.Get());
}
CHECK(sizeof(Status) == sizeof(uint32_t)) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
@@ -149,7 +142,8 @@
return name;
}
Thread* self = Thread::Current();
- SirtRef<mirror::Class> sirt_c(self, this);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_c(hs.NewHandle(this));
std::string descriptor(ClassHelper(this).GetDescriptor());
if ((descriptor[0] != 'L') && (descriptor[0] != '[')) {
// The descriptor indicates that this is the class for
@@ -179,7 +173,7 @@
std::replace(descriptor.begin(), descriptor.end(), '/', '.');
name = String::AllocFromModifiedUtf8(self, descriptor.c_str());
}
- sirt_c->SetName(name);
+ handle_c->SetName(name);
return name;
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 1f393db..92b999e 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -429,7 +429,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Classes and arrays vary in size, and so the object_size_ field cannot
- // be used to get their instance size
+ // be used to Get their instance size
return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
IsArrayClass<kVerifyFlags, kReadBarrierOption>();
}
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index fef1f9b..3d28dc6 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -23,7 +23,7 @@
#include "gc/heap.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
namespace mirror {
@@ -32,9 +32,10 @@
TEST_F(DexCacheTest, Open) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<DexCache> dex_cache(soa.Self(), class_linker_->AllocDexCache(soa.Self(),
- *java_lang_dex_file_));
- ASSERT_TRUE(dex_cache.get() != NULL);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<DexCache> dex_cache(
+ hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
+ ASSERT_TRUE(dex_cache.Get() != NULL);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 2f775bc..04905a5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -32,7 +32,7 @@
#include "object_array-inl.h"
#include "object_utils.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "throwable.h"
#include "well_known_classes.h"
@@ -100,19 +100,19 @@
// An allocation pre-fence visitor that copies the object.
class CopyObjectVisitor {
public:
- explicit CopyObjectVisitor(Thread* self, SirtRef<Object>* orig, size_t num_bytes)
+ explicit CopyObjectVisitor(Thread* self, Handle<Object>* orig, size_t num_bytes)
: self_(self), orig_(orig), num_bytes_(num_bytes) {
}
void operator()(Object* obj, size_t usable_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(usable_size);
- CopyObject(self_, obj, orig_->get(), num_bytes_);
+ CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
private:
Thread* const self_;
- SirtRef<Object>* const orig_;
+ Handle<Object>* const orig_;
const size_t num_bytes_;
DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
};
@@ -123,7 +123,8 @@
// be wrong.
gc::Heap* heap = Runtime::Current()->GetHeap();
size_t num_bytes = SizeOf();
- SirtRef<Object> this_object(self, this);
+ StackHandleScope<1> hs(self);
+ Handle<Object> this_object(hs.NewHandle(this));
Object* copy;
CopyObjectVisitor visitor(self, &this_object, num_bytes);
if (heap->IsMovableObject(this)) {
@@ -163,10 +164,11 @@
case LockWord::kThinLocked: {
// Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
Thread* self = Thread::Current();
- SirtRef<mirror::Object> sirt_this(self, current_this);
- Monitor::InflateThinLocked(self, sirt_this, lw, GenerateIdentityHashCode());
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_this(hs.NewHandle(current_this));
+ Monitor::InflateThinLocked(self, h_this, lw, GenerateIdentityHashCode());
// A GC may have occurred when we switched to kBlocked.
- current_this = sirt_this.get();
+ current_this = h_this.Get();
break;
}
case LockWord::kFatLocked: {
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 203a6b2..942a271 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -23,7 +23,7 @@
#include "mirror/art_field.h"
#include "mirror/class.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include <string>
@@ -118,7 +118,7 @@
int32_t src_pos, int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
src->GetWithoutChecks(src_pos + i);
}
}
@@ -150,7 +150,7 @@
Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
GetWithoutChecks(dst_pos + i);
}
}
@@ -161,7 +161,7 @@
int32_t src_pos, int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
src->GetWithoutChecks(src_pos + i);
}
}
@@ -182,7 +182,7 @@
Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
GetWithoutChecks(dst_pos + i);
}
}
@@ -244,13 +244,14 @@
inline ObjectArray<T>* ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
DCHECK_GE(new_length, 0);
// We may get copied by a compacting GC.
- SirtRef<ObjectArray<T> > sirt_this(self, this);
+ StackHandleScope<1> hs(self);
+ Handle<ObjectArray<T> > h_this(hs.NewHandle(this));
gc::Heap* heap = Runtime::Current()->GetHeap();
gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
heap->GetCurrentNonMovingAllocator();
ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length, allocator_type);
if (LIKELY(new_array != nullptr)) {
- new_array->AssignableMemcpy(0, sirt_this.get(), 0, std::min(sirt_this->GetLength(), new_length));
+ new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
}
return new_array;
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index c494f13..e0fd6a2 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -34,9 +34,9 @@
#include "art_method-inl.h"
#include "object-inl.h"
#include "object_array-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "string-inl.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
namespace mirror {
@@ -56,7 +56,9 @@
}
Thread* self = Thread::Current();
- SirtRef<String> string(self, String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in));
+ StackHandleScope<1> hs(self);
+ Handle<String> string(
+ hs.NewHandle(String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in)));
ASSERT_EQ(expected_utf16_length, string->GetLength());
ASSERT_TRUE(string->GetCharArray() != NULL);
ASSERT_TRUE(string->GetCharArray()->GetData() != NULL);
@@ -102,8 +104,9 @@
TEST_F(ObjectTest, Clone) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<ObjectArray<Object> > a1(soa.Self(),
- class_linker_->AllocObjectArray<Object>(soa.Self(), 256));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ObjectArray<Object>> a1(
+ hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 256)));
size_t s1 = a1->SizeOf();
Object* clone = a1->Clone(soa.Self());
EXPECT_EQ(s1, clone->SizeOf());
@@ -112,17 +115,18 @@
TEST_F(ObjectTest, AllocObjectArray) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<ObjectArray<Object> > oa(soa.Self(),
- class_linker_->AllocObjectArray<Object>(soa.Self(), 2));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ObjectArray<Object> > oa(
+ hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 2)));
EXPECT_EQ(2, oa->GetLength());
EXPECT_TRUE(oa->Get(0) == NULL);
EXPECT_TRUE(oa->Get(1) == NULL);
- oa->Set<false>(0, oa.get());
- EXPECT_TRUE(oa->Get(0) == oa.get());
+ oa->Set<false>(0, oa.Get());
+ EXPECT_TRUE(oa->Get(0) == oa.Get());
EXPECT_TRUE(oa->Get(1) == NULL);
- oa->Set<false>(1, oa.get());
- EXPECT_TRUE(oa->Get(0) == oa.get());
- EXPECT_TRUE(oa->Get(1) == oa.get());
+ oa->Set<false>(1, oa.Get());
+ EXPECT_TRUE(oa->Get(0) == oa.Get());
+ EXPECT_TRUE(oa->Get(1) == oa.Get());
Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/ArrayIndexOutOfBoundsException;");
@@ -149,20 +153,22 @@
TEST_F(ObjectTest, AllocArray) {
ScopedObjectAccess soa(Thread::Current());
Class* c = class_linker_->FindSystemClass(soa.Self(), "[I");
- SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<Array> a(
+ hs.NewHandle(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator())));
EXPECT_TRUE(c == a->GetClass());
EXPECT_EQ(1, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()));
EXPECT_TRUE(c == a->GetClass());
EXPECT_EQ(1, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()));
EXPECT_TRUE(c == a->GetClass());
EXPECT_EQ(1, a->GetLength());
}
@@ -170,28 +176,27 @@
TEST_F(ObjectTest, AllocArray_FillUsable) {
ScopedObjectAccess soa(Thread::Current());
Class* c = class_linker_->FindSystemClass(soa.Self(), "[B");
- SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator(),
- true));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<Array> a(
+ hs.NewHandle(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator(), true)));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(1, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[I");
- a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator(),
- true));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator(), true));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(2, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator(),
- true));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator(), true));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(2, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
Runtime::Current()->GetHeap()->GetCurrentAllocator(), true));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(2, a->GetLength());
@@ -273,8 +278,9 @@
TEST_F(ObjectTest, CreateMultiArray) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(), "I"));
- SirtRef<IntArray> dims(soa.Self(), IntArray::Alloc(soa.Self(), 1));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<Class> c(hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "I")));
+ Handle<IntArray> dims(hs.NewHandle(IntArray::Alloc(soa.Self(), 1)));
dims->Set<false>(0, 1);
Array* multi = Array::CreateMultiArray(soa.Self(), c, dims);
EXPECT_TRUE(multi->GetClass() == class_linker_->FindSystemClass(soa.Self(), "[I"));
@@ -287,7 +293,7 @@
"java.lang.NegativeArraySizeException");
soa.Self()->ClearException();
- dims.reset(IntArray::Alloc(soa.Self(), 2));
+ dims.Assign(IntArray::Alloc(soa.Self(), 2));
for (int i = 1; i < 20; ++i) {
for (int j = 0; j < 20; ++j) {
dims->Set<false>(0, i);
@@ -311,7 +317,8 @@
const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader)[0];
CHECK(dex_file != NULL);
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<ClassLoader*>(class_loader));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
ArtMethod* clinit = klass->FindClassInitializer();
const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
@@ -339,9 +346,9 @@
Object* s0 = field->GetObj(klass);
EXPECT_TRUE(s0 != NULL);
- SirtRef<CharArray> char_array(soa.Self(), CharArray::Alloc(soa.Self(), 0));
- field->SetObj<false>(field->GetDeclaringClass(), char_array.get());
- EXPECT_EQ(char_array.get(), field->GetObj(klass));
+ Handle<CharArray> char_array(hs.NewHandle(CharArray::Alloc(soa.Self(), 0)));
+ field->SetObj<false>(field->GetDeclaringClass(), char_array.Get());
+ EXPECT_EQ(char_array.Get(), field->GetObj(klass));
field->SetObj<false>(field->GetDeclaringClass(), NULL);
EXPECT_EQ(NULL, field->GetObj(klass));
@@ -375,7 +382,8 @@
TEST_F(ObjectTest, StringEqualsUtf8) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
EXPECT_TRUE(string->Equals("android"));
EXPECT_FALSE(string->Equals("Android"));
EXPECT_FALSE(string->Equals("ANDROID"));
@@ -383,46 +391,49 @@
EXPECT_FALSE(string->Equals("and"));
EXPECT_FALSE(string->Equals("androids"));
- SirtRef<String> empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
+ Handle<String> empty(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
EXPECT_TRUE(empty->Equals(""));
EXPECT_FALSE(empty->Equals("a"));
}
TEST_F(ObjectTest, StringEquals) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- SirtRef<String> string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- EXPECT_TRUE(string->Equals(string_2.get()));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ Handle<String> string_2(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ EXPECT_TRUE(string->Equals(string_2.Get()));
EXPECT_FALSE(string->Equals("Android"));
EXPECT_FALSE(string->Equals("ANDROID"));
EXPECT_FALSE(string->Equals(""));
EXPECT_FALSE(string->Equals("and"));
EXPECT_FALSE(string->Equals("androids"));
- SirtRef<String> empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
+ Handle<String> empty(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
EXPECT_TRUE(empty->Equals(""));
EXPECT_FALSE(empty->Equals("a"));
}
TEST_F(ObjectTest, StringCompareTo) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- SirtRef<String> string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- SirtRef<String> string_3(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "Android"));
- SirtRef<String> string_4(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "and"));
- SirtRef<String> string_5(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
- EXPECT_EQ(0, string->CompareTo(string_2.get()));
- EXPECT_LT(0, string->CompareTo(string_3.get()));
- EXPECT_GT(0, string_3->CompareTo(string.get()));
- EXPECT_LT(0, string->CompareTo(string_4.get()));
- EXPECT_GT(0, string_4->CompareTo(string.get()));
- EXPECT_LT(0, string->CompareTo(string_5.get()));
- EXPECT_GT(0, string_5->CompareTo(string.get()));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ Handle<String> string_2(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ Handle<String> string_3(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "Android")));
+ Handle<String> string_4(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "and")));
+ Handle<String> string_5(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
+ EXPECT_EQ(0, string->CompareTo(string_2.Get()));
+ EXPECT_LT(0, string->CompareTo(string_3.Get()));
+ EXPECT_GT(0, string_3->CompareTo(string.Get()));
+ EXPECT_LT(0, string->CompareTo(string_4.Get()));
+ EXPECT_GT(0, string_4->CompareTo(string.Get()));
+ EXPECT_LT(0, string->CompareTo(string_5.Get()));
+ EXPECT_GT(0, string_5->CompareTo(string.Get()));
}
TEST_F(ObjectTest, StringLength) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
EXPECT_EQ(string->GetLength(), 7);
EXPECT_EQ(string->GetUtfLength(), 7);
@@ -440,8 +451,9 @@
jobject jclass_loader_1 = LoadDex("ProtoCompare");
jobject jclass_loader_2 = LoadDex("ProtoCompare2");
- SirtRef<ClassLoader> class_loader_1(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader_1));
- SirtRef<ClassLoader> class_loader_2(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader_2));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<ClassLoader> class_loader_1(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_1)));
+ Handle<ClassLoader> class_loader_2(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_2)));
Class* klass1 = linker->FindClass(soa.Self(), "LProtoCompare;", class_loader_1);
ASSERT_TRUE(klass1 != NULL);
@@ -497,9 +509,10 @@
TEST_F(ObjectTest, StringHashCode) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
- SirtRef<String> A(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "A"));
- SirtRef<String> ABC(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC"));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<String> empty(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
+ Handle<String> A(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "A")));
+ Handle<String> ABC(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
EXPECT_EQ(0, empty->GetHashCode());
EXPECT_EQ(65, A->GetHashCode());
@@ -509,17 +522,18 @@
TEST_F(ObjectTest, InstanceOf) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
- SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
ASSERT_TRUE(X != NULL);
ASSERT_TRUE(Y != NULL);
- SirtRef<Object> x(soa.Self(), X->AllocObject(soa.Self()));
- SirtRef<Object> y(soa.Self(), Y->AllocObject(soa.Self()));
- ASSERT_TRUE(x.get() != NULL);
- ASSERT_TRUE(y.get() != NULL);
+ Handle<Object> x(hs.NewHandle(X->AllocObject(soa.Self())));
+ Handle<Object> y(hs.NewHandle(Y->AllocObject(soa.Self())));
+ ASSERT_TRUE(x.Get() != NULL);
+ ASSERT_TRUE(y.Get() != NULL);
EXPECT_TRUE(x->InstanceOf(X));
EXPECT_FALSE(x->InstanceOf(Y));
@@ -543,7 +557,8 @@
TEST_F(ObjectTest, IsAssignableFrom) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
- SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
@@ -580,7 +595,8 @@
TEST_F(ObjectTest, IsAssignableFromArray) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
- SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
ASSERT_TRUE(X != NULL);
@@ -632,8 +648,9 @@
TEST_F(ObjectTest, FindInstanceField) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC"));
- ASSERT_TRUE(s.get() != NULL);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
+ ASSERT_TRUE(s.Get() != NULL);
Class* c = s->GetClass();
ASSERT_TRUE(c != NULL);
@@ -665,8 +682,9 @@
TEST_F(ObjectTest, FindStaticField) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC"));
- ASSERT_TRUE(s.get() != NULL);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
+ ASSERT_TRUE(s.Get() != NULL);
Class* c = s->GetClass();
ASSERT_TRUE(c != NULL);
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index f220039..d8591cc 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -20,7 +20,7 @@
#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "object-inl.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "string.h"
namespace art {
@@ -40,9 +40,9 @@
}
StackTraceElement* StackTraceElement::Alloc(Thread* self,
- SirtRef<String>& declaring_class,
- SirtRef<String>& method_name,
- SirtRef<String>& file_name,
+ Handle<String>& declaring_class,
+ Handle<String>& method_name,
+ Handle<String>& file_name,
int32_t line_number) {
StackTraceElement* trace =
down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
@@ -57,14 +57,14 @@
}
template<bool kTransactionActive>
-void StackTraceElement::Init(SirtRef<String>& declaring_class, SirtRef<String>& method_name,
- SirtRef<String>& file_name, int32_t line_number) {
+void StackTraceElement::Init(Handle<String>& declaring_class, Handle<String>& method_name,
+ Handle<String>& file_name, int32_t line_number) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- declaring_class.get());
+ declaring_class.Get());
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- method_name.get());
+ method_name.Get());
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- file_name.get());
+ file_name.Get());
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_),
line_number);
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index c324d96..22d9b71 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -22,7 +22,7 @@
namespace art {
-template<class T> class SirtRef;
+template<class T> class Handle;
struct StackTraceElementOffsets;
namespace mirror {
@@ -47,9 +47,9 @@
}
static StackTraceElement* Alloc(Thread* self,
- SirtRef<String>& declaring_class,
- SirtRef<String>& method_name,
- SirtRef<String>& file_name,
+ Handle<String>& declaring_class,
+ Handle<String>& method_name,
+ Handle<String>& file_name,
int32_t line_number)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -70,8 +70,8 @@
int32_t line_number_;
template<bool kTransactionActive>
- void Init(SirtRef<String>& declaring_class, SirtRef<String>& method_name,
- SirtRef<String>& file_name, int32_t line_number)
+ void Init(Handle<String>& declaring_class, Handle<String>& method_name,
+ Handle<String>& file_name, int32_t line_number)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* java_lang_StackTraceElement_;
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 6a0c225..ee719b4 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -22,7 +22,7 @@
#include "intern_table.h"
#include "object-inl.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "utf-inl.h"
@@ -123,18 +123,19 @@
}
String* String::Alloc(Thread* self, int32_t utf16_length) {
- SirtRef<CharArray> array(self, CharArray::Alloc(self, utf16_length));
- if (UNLIKELY(array.get() == nullptr)) {
+ StackHandleScope<1> hs(self);
+ Handle<CharArray> array(hs.NewHandle(CharArray::Alloc(self, utf16_length)));
+ if (UNLIKELY(array.Get() == nullptr)) {
return nullptr;
}
return Alloc(self, array);
}
-String* String::Alloc(Thread* self, const SirtRef<CharArray>& array) {
+String* String::Alloc(Thread* self, const Handle<CharArray>& array) {
// Hold reference in case AllocObject causes GC.
String* string = down_cast<String*>(GetJavaLangString()->AllocObject(self));
if (LIKELY(string != nullptr)) {
- string->SetArray(array.get());
+ string->SetArray(array.Get());
string->SetCount(array->GetLength());
}
return string;
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index f97308e..169b671 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -24,7 +24,7 @@
namespace art {
-template<class T> class SirtRef;
+template<class T> class Handle;
struct StringClassOffsets;
struct StringOffsets;
class StringPiece;
@@ -137,7 +137,7 @@
static String* Alloc(Thread* self, int32_t utf16_length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static String* Alloc(Thread* self, const SirtRef<CharArray>& array)
+ static String* Alloc(Thread* self, const Handle<CharArray>& array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 38b77d1..64edba8 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -632,31 +632,32 @@
}
}
-void Monitor::InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, LockWord lock_word,
+void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object>& obj, LockWord lock_word,
uint32_t hash_code) {
DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
uint32_t owner_thread_id = lock_word.ThinLockOwner();
if (owner_thread_id == self->GetThreadId()) {
// We own the monitor, we can easily inflate it.
- Inflate(self, self, obj.get(), hash_code);
+ Inflate(self, self, obj.Get(), hash_code);
} else {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
// Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
- ScopedThreadStateChange tsc(self, kBlocked);
- self->SetMonitorEnterObject(obj.get());
- if (lock_word == obj->GetLockWord(true)) { // If lock word hasn't changed.
- bool timed_out;
- Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
- if (owner != nullptr) {
- // We succeeded in suspending the thread, check the lock's status didn't change.
- lock_word = obj->GetLockWord(true);
- if (lock_word.GetState() == LockWord::kThinLocked &&
- lock_word.ThinLockOwner() == owner_thread_id) {
- // Go ahead and inflate the lock.
- Inflate(self, owner, obj.get(), hash_code);
- }
- thread_list->Resume(owner, false);
+ self->SetMonitorEnterObject(obj.Get());
+ bool timed_out;
+ Thread* owner;
+ {
+ ScopedThreadStateChange tsc(self, kBlocked);
+ owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
+ }
+ if (owner != nullptr) {
+ // We succeeded in suspending the thread, check the lock's status didn't change.
+ lock_word = obj->GetLockWord(true);
+ if (lock_word.GetState() == LockWord::kThinLocked &&
+ lock_word.ThinLockOwner() == owner_thread_id) {
+ // Go ahead and inflate the lock.
+ Inflate(self, owner, obj.Get(), hash_code);
}
+ thread_list->Resume(owner, false);
}
self->SetMonitorEnterObject(nullptr);
}
@@ -680,15 +681,16 @@
obj = FakeLock(obj);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
- SirtRef<mirror::Object> sirt_obj(self, obj);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_obj(hs.NewHandle(obj));
while (true) {
- LockWord lock_word = sirt_obj->GetLockWord(true);
+ LockWord lock_word = h_obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kUnlocked: {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0));
- if (sirt_obj->CasLockWord(lock_word, thin_locked)) {
+ if (h_obj->CasLockWord(lock_word, thin_locked)) {
QuasiAtomic::MembarLoadLoad();
- return sirt_obj.get(); // Success!
+ return h_obj.Get(); // Success!
}
continue; // Go again.
}
@@ -699,11 +701,11 @@
uint32_t new_count = lock_word.ThinLockCount() + 1;
if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- sirt_obj->SetLockWord(thin_locked, true);
- return sirt_obj.get(); // Success!
+ h_obj->SetLockWord(thin_locked, true);
+ return h_obj.Get(); // Success!
} else {
// We'd overflow the recursion count, so inflate the monitor.
- InflateThinLocked(self, sirt_obj, lock_word, 0);
+ InflateThinLocked(self, h_obj, lock_word, 0);
}
} else {
// Contention.
@@ -713,7 +715,7 @@
NanoSleep(1000); // Sleep for 1us and re-attempt.
} else {
contention_count = 0;
- InflateThinLocked(self, sirt_obj, lock_word, 0);
+ InflateThinLocked(self, h_obj, lock_word, 0);
}
}
continue; // Start from the beginning.
@@ -721,15 +723,15 @@
case LockWord::kFatLocked: {
Monitor* mon = lock_word.FatLockMonitor();
mon->Lock(self);
- return sirt_obj.get(); // Success!
+ return h_obj.Get(); // Success!
}
case LockWord::kHashCode:
// Inflate with the existing hashcode.
- Inflate(self, nullptr, sirt_obj.get(), lock_word.GetHashCode());
+ Inflate(self, nullptr, h_obj.Get(), lock_word.GetHashCode());
continue; // Start from the beginning.
default: {
LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
- return sirt_obj.get();
+ return h_obj.Get();
}
}
}
@@ -740,12 +742,13 @@
DCHECK(obj != NULL);
obj = FakeUnlock(obj);
LockWord lock_word = obj->GetLockWord(true);
- SirtRef<mirror::Object> sirt_obj(self, obj);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_obj(hs.NewHandle(obj));
switch (lock_word.GetState()) {
case LockWord::kHashCode:
// Fall-through.
case LockWord::kUnlocked:
- FailedUnlock(sirt_obj.get(), self, nullptr, nullptr);
+ FailedUnlock(h_obj.Get(), self, nullptr, nullptr);
return false; // Failure.
case LockWord::kThinLocked: {
uint32_t thread_id = self->GetThreadId();
@@ -754,16 +757,16 @@
// TODO: there's a race here with the owner dying while we unlock.
Thread* owner =
Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
- FailedUnlock(sirt_obj.get(), self, owner, nullptr);
+ FailedUnlock(h_obj.Get(), self, owner, nullptr);
return false; // Failure.
} else {
// We own the lock, decrease the recursion count.
if (lock_word.ThinLockCount() != 0) {
uint32_t new_count = lock_word.ThinLockCount() - 1;
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- sirt_obj->SetLockWord(thin_locked, true);
+ h_obj->SetLockWord(thin_locked, true);
} else {
- sirt_obj->SetLockWord(LockWord(), true);
+ h_obj->SetLockWord(LockWord(), true);
}
return true; // Success!
}
@@ -946,7 +949,7 @@
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
if (m->IsNative()) {
if (m->IsSynchronized()) {
- mirror::Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0);
+ mirror::Object* jni_this = stack_visitor->GetCurrentHandleScope()->GetReference(0);
callback(jni_this, callback_context);
}
return;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 15620d5..bc5d2e4 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -32,7 +32,7 @@
namespace art {
class LockWord;
-template<class T> class SirtRef;
+template<class T> class Handle;
class Thread;
class StackVisitor;
typedef uint32_t MonitorId;
@@ -114,7 +114,7 @@
return monitor_id_;
}
- static void InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, LockWord lock_word,
+ static void InflateThinLocked(Thread* self, Handle<mirror::Object>& obj, LockWord lock_word,
uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
static bool Deflate(Thread* self, mirror::Object* obj)
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index ed1ee7a..5353592 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -15,8 +15,8 @@
*/
#include <algorithm>
-#include <fcntl.h>
#include <set>
+#include <fcntl.h>
#include <unistd.h>
#include "base/logging.h"
@@ -37,7 +37,7 @@
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
-#include "toStringArray.h"
+#include "well_known_classes.h"
#include "zip_archive.h"
#ifdef HAVE_ANDROID_OS
@@ -186,7 +186,9 @@
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
class_linker->RegisterDexFile(*dex_file);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(javaLoader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
mirror::Class* result = class_linker->DefineClass(descriptor.c_str(), class_loader, *dex_file,
*dex_class_def);
VLOG(class_linker) << "DexFile_defineClassNative returning " << result;
@@ -194,19 +196,24 @@
}
static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jlong cookie) {
- const DexFile* dex_file;
- dex_file = toDexFile(cookie, env);
+ jobjectArray result = nullptr;
+ const DexFile* dex_file = toDexFile(cookie, env);
if (dex_file == nullptr) {
- return nullptr;
+ result = env->NewObjectArray(dex_file->NumClassDefs(), WellKnownClasses::java_lang_String,
+ nullptr);
+ if (result != nullptr) {
+ for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ ScopedLocalRef<jstring> jdescriptor(env, env->NewStringUTF(descriptor));
+ if (jdescriptor.get() == nullptr) {
+ return nullptr;
+ }
+ env->SetObjectArrayElement(result, i, jdescriptor.get());
+ }
+ }
}
-
- std::vector<std::string> class_names;
- for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- const char* descriptor = dex_file->GetClassDescriptor(class_def);
- class_names.push_back(DescriptorToDot(descriptor));
- }
- return toStringArray(env, class_names);
+ return result;
}
// Copy a profile file
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 0b58af4..ceff206 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -28,21 +28,35 @@
#include "hprof/hprof.h"
#include "jni_internal.h"
#include "mirror/class.h"
+#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
#include "scoped_fast_native_object_access.h"
-#include "toStringArray.h"
#include "trace.h"
+#include "well_known_classes.h"
namespace art {
static jobjectArray VMDebug_getVmFeatureList(JNIEnv* env, jclass) {
- std::vector<std::string> features;
- features.push_back("method-trace-profiling");
- features.push_back("method-trace-profiling-streaming");
- features.push_back("method-sample-profiling");
- features.push_back("hprof-heap-dump");
- features.push_back("hprof-heap-dump-streaming");
- return toStringArray(env, features);
+ static const char* features[] = {
+ "method-trace-profiling",
+ "method-trace-profiling-streaming",
+ "method-sample-profiling",
+ "hprof-heap-dump",
+ "hprof-heap-dump-streaming",
+ };
+ jobjectArray result = env->NewObjectArray(arraysize(features),
+ WellKnownClasses::java_lang_String,
+ nullptr);
+ if (result != nullptr) {
+ for (size_t i = 0; i < arraysize(features); ++i) {
+ ScopedLocalRef<jstring> jfeature(env, env->NewStringUTF(features[i]));
+ if (jfeature.get() == nullptr) {
+ return nullptr;
+ }
+ env->SetObjectArrayElement(result, i, jfeature.get());
+ }
+ }
+ return result;
}
static void VMDebug_startAllocCounting(JNIEnv*, jclass) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 76c5866..8d183da 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -66,10 +66,6 @@
ThrowNullPointerException(NULL, "element class == null");
return nullptr;
}
- if (UNLIKELY(element_class->IsPrimitiveVoid())) {
- ThrowIllegalArgumentException(NULL, "Can't allocate an array of void");
- return nullptr;
- }
Runtime* runtime = Runtime::Current();
mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), element_class);
if (UNLIKELY(array_class == nullptr)) {
@@ -93,10 +89,6 @@
ThrowNullPointerException(NULL, "element class == null");
return nullptr;
}
- if (UNLIKELY(element_class->IsPrimitiveVoid())) {
- ThrowIllegalArgumentException(NULL, "Can't allocate an array of void");
- return nullptr;
- }
Runtime* runtime = Runtime::Current();
mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(soa.Self(), element_class);
if (UNLIKELY(array_class == nullptr)) {
@@ -214,7 +206,7 @@
}
// Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(SirtRef<mirror::DexCache>& dex_cache, uint32_t string_idx,
+static void PreloadDexCachesResolveString(Handle<mirror::DexCache>& dex_cache, uint32_t string_idx,
StringTable& strings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
@@ -260,7 +252,7 @@
}
// Based on ClassLinker::ResolveField.
-static void PreloadDexCachesResolveField(SirtRef<mirror::DexCache>& dex_cache,
+static void PreloadDexCachesResolveField(Handle<mirror::DexCache>& dex_cache,
uint32_t field_idx,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -275,9 +267,9 @@
return;
}
if (is_static) {
- field = klass->FindStaticField(dex_cache.get(), field_idx);
+ field = klass->FindStaticField(dex_cache.Get(), field_idx);
} else {
- field = klass->FindInstanceField(dex_cache.get(), field_idx);
+ field = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
if (field == NULL) {
return;
@@ -287,7 +279,7 @@
}
// Based on ClassLinker::ResolveMethod.
-static void PreloadDexCachesResolveMethod(SirtRef<mirror::DexCache>& dex_cache,
+static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache>& dex_cache,
uint32_t method_idx,
InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -304,14 +296,14 @@
switch (invoke_type) {
case kDirect:
case kStatic:
- method = klass->FindDirectMethod(dex_cache.get(), method_idx);
+ method = klass->FindDirectMethod(dex_cache.Get(), method_idx);
break;
case kInterface:
- method = klass->FindInterfaceMethod(dex_cache.get(), method_idx);
+ method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
break;
case kSuper:
case kVirtual:
- method = klass->FindVirtualMethod(dex_cache.get(), method_idx);
+ method = klass->FindVirtualMethod(dex_cache.Get(), method_idx);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
@@ -434,7 +426,8 @@
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != NULL);
- SirtRef<mirror::DexCache> dex_cache(self, linker->FindDexCache(*dex_file));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
if (kPreloadDexCachesStrings) {
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
@@ -444,7 +437,7 @@
if (kPreloadDexCachesTypes) {
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- PreloadDexCachesResolveType(dex_cache.get(), i);
+ PreloadDexCachesResolveType(dex_cache.Get(), i);
}
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 6daf9a9..b6cf7d8 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -62,12 +62,12 @@
}
std::string descriptor(DotToDescriptor(name.c_str()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(javaLoader));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> c(soa.Self(), class_linker->FindClass(soa.Self(), descriptor.c_str(),
- class_loader));
- if (c.get() == nullptr) {
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor.c_str(), class_loader)));
+ if (c.Get() == nullptr) {
ScopedLocalRef<jthrowable> cause(env, env->ExceptionOccurred());
env->ExceptionClear();
jthrowable cnfe = reinterpret_cast<jthrowable>(env->NewObject(WellKnownClasses::java_lang_ClassNotFoundException,
@@ -79,7 +79,7 @@
if (initialize) {
class_linker->EnsureInitialized(c, true, true);
}
- return soa.AddLocalReference<jclass>(c.get());
+ return soa.AddLocalReference<jclass>(c.Get());
}
static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) {
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 636be5d..496a1b2 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -19,12 +19,13 @@
#include <unistd.h>
#include "gc/heap.h"
+#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "ScopedUtfChars.h"
-#include "sirt_ref-inl.h"
+#include "verify_object-inl.h"
namespace art {
@@ -65,8 +66,9 @@
std::string detail;
{
ScopedObjectAccess soa(env);
- SirtRef<mirror::ClassLoader> classLoader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(javaLoader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> classLoader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, &detail);
if (success) {
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index a991818..7c6f2f3 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -22,21 +22,22 @@
#include "mirror/object-inl.h"
#include "object_utils.h"
#include "scoped_fast_native_object_access.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
ScopedFastNativeObjectAccess soa(env);
DCHECK(javaElementClass != NULL);
- SirtRef<mirror::Class> element_class(soa.Self(), soa.Decode<mirror::Class*>(javaElementClass));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> element_class(hs.NewHandle(soa.Decode<mirror::Class*>(javaElementClass)));
DCHECK(element_class->IsClass());
DCHECK(javaDimArray != NULL);
mirror::Object* dimensions_obj = soa.Decode<mirror::Object*>(javaDimArray);
DCHECK(dimensions_obj->IsArrayInstance());
DCHECK_STREQ(ClassHelper(dimensions_obj->GetClass()).GetDescriptor(), "[I");
- SirtRef<mirror::IntArray> dimensions_array(soa.Self(),
- down_cast<mirror::IntArray*>(dimensions_obj));
+ Handle<mirror::IntArray> dimensions_array(
+ hs.NewHandle(down_cast<mirror::IntArray*>(dimensions_obj)));
mirror::Array* new_array = mirror::Array::CreateMultiArray(soa.Self(), element_class,
dimensions_array);
return soa.AddLocalReference<jobject>(new_array);
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 2445b53..1981bfd 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -38,13 +38,14 @@
jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
- SirtRef<mirror::Class> c(soa.Self(), m->GetDeclaringClass());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass()));
if (UNLIKELY(c->IsAbstract())) {
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/InstantiationException;",
"Can't instantiate %s %s",
c->IsInterface() ? "interface" : "abstract class",
- PrettyDescriptor(c.get()).c_str());
+ PrettyDescriptor(c.Get()).c_str());
return nullptr;
}
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index ce622d9..0d54772 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -94,13 +94,14 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
soa.Self()->AssertThreadSuspensionIsAllowable();
if (f->IsStatic()) {
- SirtRef<mirror::Class> sirt_klass(soa.Self(), f->GetDeclaringClass());
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true))) {
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> h_klass(hs.NewHandle(f->GetDeclaringClass()));
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true))) {
DCHECK(soa.Self()->IsExceptionPending());
*class_or_rcvr = nullptr;
return false;
}
- *class_or_rcvr = sirt_klass.get();
+ *class_or_rcvr = h_klass.Get();
return true;
}
@@ -271,7 +272,8 @@
const char* field_type_desciptor = fh.GetTypeDescriptor();
field_prim_type = Primitive::GetType(field_type_desciptor[0]);
if (field_prim_type == Primitive::kPrimNot) {
- SirtRef<mirror::Object> sirt_obj(soa.Self(), o);
+ StackHandleScope<1> hs(soa.Self());
+ HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&o));
// May cause resolution.
CHECK(!kMovingFields) << "Resolution may trigger thread suspension";
field_type = fh.GetType(true);
diff --git a/runtime/oat.cc b/runtime/oat.cc
index a1f4fd0..cb9334a 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '2', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '2', '8', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
@@ -345,40 +345,34 @@
OatMethodOffsets::OatMethodOffsets()
: code_offset_(0),
- frame_size_in_bytes_(0),
- core_spill_mask_(0),
- fp_spill_mask_(0),
gc_map_offset_(0)
{}
OatMethodOffsets::OatMethodOffsets(uint32_t code_offset,
- uint32_t frame_size_in_bytes,
- uint32_t core_spill_mask,
- uint32_t fp_spill_mask,
uint32_t gc_map_offset
)
: code_offset_(code_offset),
- frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask),
- fp_spill_mask_(fp_spill_mask),
gc_map_offset_(gc_map_offset)
{}
OatMethodOffsets::~OatMethodOffsets() {}
-OatMethodHeader::OatMethodHeader()
+OatQuickMethodHeader::OatQuickMethodHeader()
: mapping_table_offset_(0),
vmap_table_offset_(0),
+ frame_info_(0, 0, 0),
code_size_(0)
{}
-OatMethodHeader::OatMethodHeader(uint32_t vmap_table_offset, uint32_t mapping_table_offset,
- uint32_t code_size)
+OatQuickMethodHeader::OatQuickMethodHeader(
+ uint32_t mapping_table_offset, uint32_t vmap_table_offset, uint32_t frame_size_in_bytes,
+ uint32_t core_spill_mask, uint32_t fp_spill_mask, uint32_t code_size)
: mapping_table_offset_(mapping_table_offset),
vmap_table_offset_(vmap_table_offset),
+ frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
code_size_(code_size)
{}
-OatMethodHeader::~OatMethodHeader() {}
+OatQuickMethodHeader::~OatQuickMethodHeader() {}
} // namespace art
diff --git a/runtime/oat.h b/runtime/oat.h
index e9dfae9..7be768c 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "dex_file.h"
#include "instruction_set.h"
+#include "quick/quick_method_frame_info.h"
namespace art {
@@ -137,34 +138,31 @@
OatMethodOffsets();
OatMethodOffsets(uint32_t code_offset,
- uint32_t frame_size_in_bytes,
- uint32_t core_spill_mask,
- uint32_t fp_spill_mask,
uint32_t gc_map_offset);
~OatMethodOffsets();
uint32_t code_offset_;
- uint32_t frame_size_in_bytes_;
- uint32_t core_spill_mask_;
- uint32_t fp_spill_mask_;
uint32_t gc_map_offset_;
};
-// OatMethodHeader precedes the raw code chunk generated by the Quick compiler.
-class PACKED(4) OatMethodHeader {
+// OatQuickMethodHeader precedes the raw code chunk generated by the Quick compiler.
+class PACKED(4) OatQuickMethodHeader {
public:
- OatMethodHeader();
+ OatQuickMethodHeader();
- explicit OatMethodHeader(uint32_t mapping_table_offset, uint32_t vmap_table_offset,
- uint32_t code_size);
+ explicit OatQuickMethodHeader(uint32_t mapping_table_offset, uint32_t vmap_table_offset,
+ uint32_t frame_size_in_bytes, uint32_t core_spill_mask,
+ uint32_t fp_spill_mask, uint32_t code_size);
- ~OatMethodHeader();
+ ~OatQuickMethodHeader();
// The offset in bytes from the start of the mapping table to the end of the header.
uint32_t mapping_table_offset_;
// The offset in bytes from the start of the vmap table to the end of the header.
uint32_t vmap_table_offset_;
+ // The stack frame information.
+ QuickMethodFrameInfo frame_info_;
// The code size in bytes.
uint32_t code_size_;
};
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 00ae797..97ca6b2 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -21,6 +21,30 @@
namespace art {
+inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FrameSizeInBytes();
+}
+
+inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.CoreSpillMask();
+}
+
+inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return 0u;
+ }
+ return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FpSpillMask();
+}
+
inline uint32_t OatFile::OatMethod::GetMappingTableOffset() const {
const uint8_t* mapping_table = GetMappingTable();
return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u);
@@ -36,7 +60,7 @@
if (code == nullptr) {
return nullptr;
}
- uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].mapping_table_offset_;
+ uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].mapping_table_offset_;
if (UNLIKELY(offset == 0u)) {
return nullptr;
}
@@ -48,7 +72,7 @@
if (code == nullptr) {
return nullptr;
}
- uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].vmap_table_offset_;
+ uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].vmap_table_offset_;
if (UNLIKELY(offset == 0u)) {
return nullptr;
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 56e1f05..7976f6a 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -464,7 +464,7 @@
// NOTE: We don't keep the number of methods and cannot do a bounds check for method_index.
if (methods_pointer_ == NULL) {
CHECK_EQ(kOatClassNoneCompiled, type_);
- return OatMethod(NULL, 0, 0, 0, 0, 0);
+ return OatMethod(NULL, 0, 0);
}
size_t methods_pointer_index;
if (bitmap_ == NULL) {
@@ -473,7 +473,7 @@
} else {
CHECK_EQ(kOatClassSomeCompiled, type_);
if (!BitVector::IsBitSet(bitmap_, method_index)) {
- return OatMethod(NULL, 0, 0, 0, 0, 0);
+ return OatMethod(NULL, 0, 0);
}
size_t num_set_bits = BitVector::NumSetBits(bitmap_, method_index);
methods_pointer_index = num_set_bits;
@@ -482,23 +482,14 @@
return OatMethod(
oat_file_->Begin(),
oat_method_offsets.code_offset_,
- oat_method_offsets.frame_size_in_bytes_,
- oat_method_offsets.core_spill_mask_,
- oat_method_offsets.fp_spill_mask_,
oat_method_offsets.gc_map_offset_);
}
OatFile::OatMethod::OatMethod(const byte* base,
const uint32_t code_offset,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
const uint32_t gc_map_offset)
: begin_(base),
code_offset_(code_offset),
- frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask),
- fp_spill_mask_(fp_spill_mask),
native_gc_map_offset_(gc_map_offset) {
}
@@ -519,9 +510,6 @@
CHECK(method != NULL);
method->SetEntryPointFromPortableCompiledCode(GetPortableCode());
method->SetEntryPointFromQuickCompiledCode(GetQuickCode());
- method->SetFrameSizeInBytes(frame_size_in_bytes_);
- method->SetCoreSpillMask(core_spill_mask_);
- method->SetFpSpillMask(fp_spill_mask_);
method->SetNativeGcMap(GetNativeGcMap()); // Used by native methods in work around JNI mode.
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b358a00..e5dc53c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -78,15 +78,6 @@
uint32_t GetCodeOffset() const {
return code_offset_;
}
- size_t GetFrameSizeInBytes() const {
- return frame_size_in_bytes_;
- }
- uint32_t GetCoreSpillMask() const {
- return core_spill_mask_;
- }
- uint32_t GetFpSpillMask() const {
- return fp_spill_mask_;
- }
uint32_t GetNativeGcMapOffset() const {
return native_gc_map_offset_;
}
@@ -120,6 +111,9 @@
return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
}
+ size_t GetFrameSizeInBytes() const;
+ uint32_t GetCoreSpillMask() const;
+ uint32_t GetFpSpillMask() const;
uint32_t GetMappingTableOffset() const;
uint32_t GetVmapTableOffset() const;
const uint8_t* GetMappingTable() const;
@@ -130,9 +124,6 @@
// Create an OatMethod with offsets relative to the given base address
OatMethod(const byte* base,
const uint32_t code_offset,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
const uint32_t gc_map_offset);
private:
@@ -147,9 +138,6 @@
const byte* begin_;
uint32_t code_offset_;
- size_t frame_size_in_bytes_;
- uint32_t core_spill_mask_;
- uint32_t fp_spill_mask_;
uint32_t native_gc_map_offset_;
friend class OatClass;
@@ -165,7 +153,7 @@
return type_;
}
- // get the OatMethod entry based on its index into the class
+ // Get the OatMethod entry based on its index into the class
// defintion. direct methods come first, followed by virtual
// methods. note that runtime created methods such as miranda
// methods are not included.
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index 504537a..0dd6ca1 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -29,7 +29,7 @@
#include "mirror/string.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include <string>
@@ -38,33 +38,33 @@
template <typename T>
class ObjectLock {
public:
- explicit ObjectLock(Thread* self, const SirtRef<T>* object)
+ explicit ObjectLock(Thread* self, const Handle<T>* object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: self_(self), obj_(object) {
CHECK(object != nullptr);
- CHECK(object->get() != nullptr);
- obj_->get()->MonitorEnter(self_);
+ CHECK(object->Get() != nullptr);
+ obj_->Get()->MonitorEnter(self_);
}
~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- obj_->get()->MonitorExit(self_);
+ obj_->Get()->MonitorExit(self_);
}
void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Monitor::Wait(self_, obj_->get(), 0, 0, false, kWaiting);
+ Monitor::Wait(self_, obj_->Get(), 0, 0, false, kWaiting);
}
void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- obj_->get()->Notify(self_);
+ obj_->Get()->Notify(self_);
}
void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- obj_->get()->NotifyAll(self_);
+ obj_->Get()->NotifyAll(self_);
}
private:
Thread* const self_;
- const SirtRef<T>* const obj_;
+ const Handle<T>* const obj_;
DISALLOW_COPY_AND_ASSIGN(ObjectLock);
};
@@ -378,7 +378,8 @@
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
- SirtRef<mirror::DexCache> dex_cache(Thread::Current(), GetDexCache());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache()));
return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, dex_cache);
}
@@ -607,7 +608,8 @@
mirror::String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* s = method_->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(s == nullptr)) {
- SirtRef<mirror::DexCache> dex_cache(Thread::Current(), GetDexCache());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache()));
s = GetClassLinker()->ResolveString(GetDexFile(), string_idx, dex_cache);
}
return s;
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index 7ce17e0..d9a5813 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -23,7 +23,7 @@
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 8c18dff..55b6a27 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -674,17 +674,22 @@
// the art specific version. This can happen with on device
// boot.art/boot.oat generation by GenerateImage which relies on the
// value of BOOTCLASSPATH.
+#if defined(ART_TARGET)
std::string core_jar("/core.jar");
+ std::string core_libart_jar("/core-libart.jar");
+#else
+ // The host uses hostdex files.
+ std::string core_jar("/core-hostdex.jar");
+ std::string core_libart_jar("/core-libart-hostdex.jar");
+#endif
size_t core_jar_pos = boot_class_path_string_.find(core_jar);
if (core_jar_pos != std::string::npos) {
- boot_class_path_string_.replace(core_jar_pos, core_jar.size(), "/core-libart.jar");
+ boot_class_path_string_.replace(core_jar_pos, core_jar.size(), core_libart_jar);
}
if (compiler_callbacks_ == nullptr && image_.empty()) {
image_ += GetAndroidRoot();
- image_ += "/framework/boot-";
- image_ += GetInstructionSetString(image_isa_);
- image_ += ".art";
+ image_ += "/framework/boot.art";
}
if (heap_growth_limit_ == 0) {
heap_growth_limit_ = heap_maximum_size_;
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 7f293cd..39f7638 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -16,7 +16,7 @@
#include "parsed_options.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "common_runtime_test.h"
namespace art {
diff --git a/runtime/profiler.h b/runtime/profiler.h
index 31fdc79..bcd7c29 100644
--- a/runtime/profiler.h
+++ b/runtime/profiler.h
@@ -29,7 +29,7 @@
#include "instrumentation.h"
#include "os.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index eebfba8..8517e34 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -107,8 +107,9 @@
TEST_F(ProxyTest, ProxyClassHelper) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Interfaces");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
mirror::Class* I = class_linker_->FindClass(soa.Self(), "LInterfaces$I;", class_loader);
mirror::Class* J = class_linker_->FindClass(soa.Self(), "LInterfaces$J;", class_loader);
@@ -136,8 +137,9 @@
TEST_F(ProxyTest, ProxyFieldHelper) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Interfaces");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
mirror::Class* I = class_linker_->FindClass(soa.Self(), "LInterfaces$I;", class_loader);
mirror::Class* J = class_linker_->FindClass(soa.Self(), "LInterfaces$J;", class_loader);
diff --git a/runtime/quick/quick_method_frame_info.h b/runtime/quick/quick_method_frame_info.h
new file mode 100644
index 0000000..684d4da
--- /dev/null
+++ b/runtime/quick/quick_method_frame_info.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_QUICK_QUICK_METHOD_FRAME_INFO_H_
+#define ART_RUNTIME_QUICK_QUICK_METHOD_FRAME_INFO_H_
+
+#include <stdint.h>
+
+#include "base/macros.h"
+
+namespace art {
+
+class PACKED(4) QuickMethodFrameInfo {
+ public:
+ constexpr QuickMethodFrameInfo()
+ : frame_size_in_bytes_(0u),
+ core_spill_mask_(0u),
+ fp_spill_mask_(0u) {
+ }
+
+ constexpr QuickMethodFrameInfo(uint32_t frame_size_in_bytes, uint32_t core_spill_mask,
+ uint32_t fp_spill_mask)
+ : frame_size_in_bytes_(frame_size_in_bytes),
+ core_spill_mask_(core_spill_mask),
+ fp_spill_mask_(fp_spill_mask) {
+ }
+
+ uint32_t FrameSizeInBytes() const {
+ return frame_size_in_bytes_;
+ }
+
+ uint32_t CoreSpillMask() const {
+ return core_spill_mask_;
+ }
+
+ uint32_t FpSpillMask() const {
+ return fp_spill_mask_;
+ }
+
+ private:
+ uint32_t frame_size_in_bytes_;
+ uint32_t core_spill_mask_;
+ uint32_t fp_spill_mask_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_QUICK_QUICK_METHOD_FRAME_INFO_H_
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index aee0d64..8300195 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -20,7 +20,7 @@
#include "deoptimize_stack_visitor.h"
#include "entrypoints/entrypoint_utils.h"
#include "mirror/art_method-inl.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -35,10 +35,11 @@
void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
mirror::Throwable* exception) {
DCHECK(!is_deoptimization_);
- SirtRef<mirror::Throwable> exception_ref(self_, exception);
+ StackHandleScope<1> hs(self_);
+ Handle<mirror::Throwable> exception_ref(hs.NewHandle(exception));
// Walk the stack to find catch handler or prepare for deoptimization.
- CatchBlockStackVisitor visitor(self_, context_, exception_ref, this);
+ CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this);
visitor.WalkStack(true);
mirror::ArtMethod* catch_method = *handler_quick_frame_;
@@ -56,13 +57,13 @@
DCHECK(!self_->IsExceptionPending());
} else {
// Put exception back in root set with clear throw location.
- self_->SetException(ThrowLocation(), exception_ref.get());
+ self_->SetException(ThrowLocation(), exception_ref.Get());
}
// The debugger may suspend this thread and walk its stack. Let's do this before popping
// instrumentation frames.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
instrumentation->ExceptionCaughtEvent(self_, throw_location, catch_method, handler_dex_pc_,
- exception_ref.get());
+ exception_ref.Get());
}
void QuickExceptionHandler::DeoptimizeStack() {
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f0ba003..98310e6 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -467,11 +467,12 @@
mirror::Class* declaring_class = m->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
- SirtRef<mirror::Class> sirt_c(soa.Self(), declaring_class);
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
return nullptr;
}
- declaring_class = sirt_c.get();
+ declaring_class = h_class.Get();
}
mirror::Object* receiver = nullptr;
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index f7fc020..3b66abe 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -87,10 +87,10 @@
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> null_class_loader(self, nullptr);
- SirtRef<mirror::ClassLoader>
- class_loader(self,
- ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(
+ ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader)));
if (is_static) {
MakeExecutable(ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader),
class_name);
@@ -485,8 +485,9 @@
TEST_DISABLED_FOR_PORTABLE();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
- SirtRef<mirror::ClassLoader>
- class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
new file mode 100644
index 0000000..29ddd1d
--- /dev/null
+++ b/runtime/runtime-inl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_RUNTIME_INL_H_
+#define ART_RUNTIME_RUNTIME_INL_H_
+
+#include "runtime.h"
+
+namespace art {
+
+inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) const {
+ DCHECK(method != nullptr);
+ // Cannot be imt-conflict-method or resolution-method.
+ DCHECK(method != GetImtConflictMethod());
+ DCHECK(method != GetResolutionMethod());
+ // Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
+ if (method == callee_save_methods_[Runtime::kRefsAndArgs]) {
+ return GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ } else if (method == callee_save_methods_[Runtime::kSaveAll]) {
+ return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAll);
+ } else {
+ DCHECK(method == callee_save_methods_[Runtime::kRefsOnly]);
+ return GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly);
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_RUNTIME_INL_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b913bd1..d183cba 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -30,10 +30,15 @@
#include <vector>
#include <fcntl.h>
+#include "arch/arm/quick_method_frame_info_arm.h"
#include "arch/arm/registers_arm.h"
+#include "arch/arm64/quick_method_frame_info_arm64.h"
#include "arch/arm64/registers_arm64.h"
+#include "arch/mips/quick_method_frame_info_mips.h"
#include "arch/mips/registers_mips.h"
+#include "arch/x86/quick_method_frame_info_x86.h"
#include "arch/x86/registers_x86.h"
+#include "arch/x86_64/quick_method_frame_info_x86_64.h"
#include "arch/x86_64/registers_x86_64.h"
#include "atomic.h"
#include "class_linker.h"
@@ -55,18 +60,19 @@
#include "monitor.h"
#include "parsed_options.h"
#include "oat_file.h"
+#include "quick/quick_method_frame_info.h"
#include "reflection.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "signal_catcher.h"
#include "signal_set.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "trace.h"
#include "transaction.h"
#include "profiler.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
@@ -88,6 +94,7 @@
resolution_method_(nullptr),
imt_conflict_method_(nullptr),
default_imt_(nullptr),
+ instruction_set_(kNone),
compiler_callbacks_(nullptr),
is_zygote_(false),
is_concurrent_gc_enabled_(true),
@@ -324,8 +331,9 @@
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> class_loader_class(
- soa.Self(), soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Class> class_loader_class(
+ hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
CHECK(cl->EnsureInitialized(class_loader_class, true, true));
mirror::ArtMethod* getSystemClassLoader =
@@ -333,19 +341,18 @@
CHECK(getSystemClassLoader != NULL);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- down_cast<mirror::ClassLoader*>(result.GetL()));
- CHECK(class_loader.get() != nullptr);
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(down_cast<mirror::ClassLoader*>(result.GetL())));
+ CHECK(class_loader.Get() != nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
ScopedLocalRef<jobject> system_class_loader(env,
- soa.AddLocalReference<jobject>(class_loader.get()));
+ soa.AddLocalReference<jobject>(class_loader.Get()));
CHECK(system_class_loader.get() != nullptr);
- soa.Self()->SetClassLoaderOverride(class_loader.get());
+ soa.Self()->SetClassLoaderOverride(class_loader.Get());
- SirtRef<mirror::Class> thread_class(
- soa.Self(),
- soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread));
+ Handle<mirror::Class> thread_class(
+ hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
CHECK(cl->EnsureInitialized(thread_class, true, true));
mirror::ArtField* contextClassLoader =
@@ -353,7 +360,7 @@
CHECK(contextClassLoader != NULL);
// We can't run in a transaction yet.
- contextClassLoader->SetObject<false>(soa.Self()->GetPeer(), class_loader.get());
+ contextClassLoader->SetObject<false>(soa.Self()->GetPeer(), class_loader.Get());
return env->NewGlobalRef(system_class_loader.get());
}
@@ -675,7 +682,8 @@
std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore"));
std::string reason;
self->TransitionFromSuspendedToRunnable();
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (!instance_->java_vm_->LoadNativeLibrary(mapped_name, class_loader, &reason)) {
LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason;
}
@@ -937,19 +945,22 @@
mirror::ObjectArray<mirror::ArtMethod>* Runtime::CreateDefaultImt(ClassLinker* cl) {
Thread* self = Thread::Current();
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> > imtable(self, cl->AllocArtMethodArray(self, 64));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable(
+ hs.NewHandle(cl->AllocArtMethodArray(self, 64)));
mirror::ArtMethod* imt_conflict_method = Runtime::Current()->GetImtConflictMethod();
for (size_t i = 0; i < static_cast<size_t>(imtable->GetLength()); i++) {
imtable->Set<false>(i, imt_conflict_method);
}
- return imtable.get();
+ return imtable.Get();
}
mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- SirtRef<mirror::ArtMethod> method(self, class_linker->AllocArtMethod(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for imt conflict method saves.
method->SetDexMethodIndex(DexFile::kDexNoIndex);
@@ -961,14 +972,15 @@
method->SetEntryPointFromPortableCompiledCode(GetPortableImtConflictTrampoline(class_linker));
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictTrampoline(class_linker));
}
- return method.get();
+ return method.Get();
}
mirror::ArtMethod* Runtime::CreateResolutionMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- SirtRef<mirror::ArtMethod> method(self, class_linker->AllocArtMethod(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for resolution method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
@@ -980,133 +992,22 @@
method->SetEntryPointFromPortableCompiledCode(GetPortableResolutionTrampoline(class_linker));
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionTrampoline(class_linker));
}
- return method.get();
+ return method.Get();
}
-mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_set,
- CalleeSaveType type) {
+mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(CalleeSaveType type) {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- SirtRef<mirror::ArtMethod> method(self, class_linker->AllocArtMethod(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for callee saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
method->SetEntryPointFromPortableCompiledCode(nullptr);
method->SetEntryPointFromQuickCompiledCode(nullptr);
- if ((instruction_set == kThumb2) || (instruction_set == kArm)) {
- uint32_t ref_spills = (1 << art::arm::R5) | (1 << art::arm::R6) | (1 << art::arm::R7) |
- (1 << art::arm::R8) | (1 << art::arm::R10) | (1 << art::arm::R11);
- uint32_t arg_spills = (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
- uint32_t all_spills = (1 << art::arm::R4) | (1 << art::arm::R9);
- uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
- (type == kSaveAll ? all_spills : 0) | (1 << art::arm::LR);
- uint32_t fp_all_spills = (1 << art::arm::S0) | (1 << art::arm::S1) | (1 << art::arm::S2) |
- (1 << art::arm::S3) | (1 << art::arm::S4) | (1 << art::arm::S5) |
- (1 << art::arm::S6) | (1 << art::arm::S7) | (1 << art::arm::S8) |
- (1 << art::arm::S9) | (1 << art::arm::S10) | (1 << art::arm::S11) |
- (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) |
- (1 << art::arm::S15) | (1 << art::arm::S16) | (1 << art::arm::S17) |
- (1 << art::arm::S18) | (1 << art::arm::S19) | (1 << art::arm::S20) |
- (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
- (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) |
- (1 << art::arm::S27) | (1 << art::arm::S28) | (1 << art::arm::S29) |
- (1 << art::arm::S30) | (1 << art::arm::S31);
- uint32_t fp_spills = type == kSaveAll ? fp_all_spills : 0;
- size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
- POPCOUNT(fp_spills) /* fprs */ +
- 1 /* Method* */) * kArmPointerSize, kStackAlignment);
- method->SetFrameSizeInBytes(frame_size);
- method->SetCoreSpillMask(core_spills);
- method->SetFpSpillMask(fp_spills);
- } else if (instruction_set == kMips) {
- uint32_t ref_spills = (1 << art::mips::S2) | (1 << art::mips::S3) | (1 << art::mips::S4) |
- (1 << art::mips::S5) | (1 << art::mips::S6) | (1 << art::mips::S7) |
- (1 << art::mips::GP) | (1 << art::mips::FP);
- uint32_t arg_spills = (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3);
- uint32_t all_spills = (1 << art::mips::S0) | (1 << art::mips::S1);
- uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
- (type == kSaveAll ? all_spills : 0) | (1 << art::mips::RA);
- size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
- (type == kRefsAndArgs ? 0 : 3) + 1 /* Method* */) *
- kMipsPointerSize, kStackAlignment);
- method->SetFrameSizeInBytes(frame_size);
- method->SetCoreSpillMask(core_spills);
- method->SetFpSpillMask(0);
- } else if (instruction_set == kX86) {
- uint32_t ref_spills = (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
- uint32_t arg_spills = (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
- uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
- (1 << art::x86::kNumberOfCpuRegisters); // fake return address callee save
- size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
- 1 /* Method* */) * kX86PointerSize, kStackAlignment);
- method->SetFrameSizeInBytes(frame_size);
- method->SetCoreSpillMask(core_spills);
- method->SetFpSpillMask(0);
- } else if (instruction_set == kX86_64) {
- uint32_t ref_spills =
- (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
- (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
- uint32_t arg_spills =
- (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
- (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
- uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
- (1 << art::x86_64::kNumberOfCpuRegisters); // fake return address callee save
- uint32_t fp_arg_spills =
- (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
- (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
- (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
- uint32_t fp_spills = (type == kRefsAndArgs ? fp_arg_spills : 0);
- size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
- POPCOUNT(fp_spills) /* fprs */ +
- 1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
- method->SetFrameSizeInBytes(frame_size);
- method->SetCoreSpillMask(core_spills);
- method->SetFpSpillMask(fp_spills);
- } else if (instruction_set == kArm64) {
- // Callee saved registers
- uint32_t ref_spills = (1 << art::arm64::X19) | (1 << art::arm64::X20) | (1 << art::arm64::X21) |
- (1 << art::arm64::X22) | (1 << art::arm64::X23) | (1 << art::arm64::X24) |
- (1 << art::arm64::X25) | (1 << art::arm64::X26) | (1 << art::arm64::X27) |
- (1 << art::arm64::X28);
- // X0 is the method pointer. Not saved.
- uint32_t arg_spills = (1 << art::arm64::X1) | (1 << art::arm64::X2) | (1 << art::arm64::X3) |
- (1 << art::arm64::X4) | (1 << art::arm64::X5) | (1 << art::arm64::X6) |
- (1 << art::arm64::X7);
- // TODO This is conservative. Only ALL should include the thread register.
- // The thread register is not preserved by the aapcs64.
- // LR is always saved.
- uint32_t all_spills = 0; // (1 << art::arm64::LR);
- uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
- (type == kSaveAll ? all_spills : 0) | (1 << art::arm64::FP)
- | (1 << art::arm64::X18) | (1 << art::arm64::LR);
-
- // Save callee-saved floating point registers. Rest are scratch/parameters.
- uint32_t fp_arg_spills = (1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
- (1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
- (1 << art::arm64::D6) | (1 << art::arm64::D7);
- uint32_t fp_ref_spills = (1 << art::arm64::D8) | (1 << art::arm64::D9) | (1 << art::arm64::D10) |
- (1 << art::arm64::D11) | (1 << art::arm64::D12) | (1 << art::arm64::D13) |
- (1 << art::arm64::D14) | (1 << art::arm64::D15);
- uint32_t fp_all_spills = fp_arg_spills |
- (1 << art::arm64::D16) | (1 << art::arm64::D17) | (1 << art::arm64::D18) |
- (1 << art::arm64::D19) | (1 << art::arm64::D20) | (1 << art::arm64::D21) |
- (1 << art::arm64::D22) | (1 << art::arm64::D23) | (1 << art::arm64::D24) |
- (1 << art::arm64::D25) | (1 << art::arm64::D26) | (1 << art::arm64::D27) |
- (1 << art::arm64::D28) | (1 << art::arm64::D29) | (1 << art::arm64::D30) |
- (1 << art::arm64::D31);
- uint32_t fp_spills = fp_ref_spills | (type == kRefsAndArgs ? fp_arg_spills: 0)
- | (type == kSaveAll ? fp_all_spills : 0);
- size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
- POPCOUNT(fp_spills) /* fprs */ +
- 1 /* Method* */) * kArm64PointerSize, kStackAlignment);
- method->SetFrameSizeInBytes(frame_size);
- method->SetCoreSpillMask(core_spills);
- method->SetFpSpillMask(fp_spills);
- } else {
- UNIMPLEMENTED(FATAL) << instruction_set;
- }
- return method.get();
+ DCHECK_NE(instruction_set_, kNone);
+ return method.Get();
}
void Runtime::DisallowNewSystemWeaks() {
@@ -1123,6 +1024,38 @@
Dbg::AllowNewObjectRegistryObjects();
}
+void Runtime::SetInstructionSet(InstructionSet instruction_set) {
+ instruction_set_ = instruction_set;
+ if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
+ for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ CalleeSaveType type = static_cast<CalleeSaveType>(i);
+ callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
+ }
+ } else if (instruction_set_ == kMips) {
+ for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ CalleeSaveType type = static_cast<CalleeSaveType>(i);
+ callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
+ }
+ } else if (instruction_set_ == kX86) {
+ for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ CalleeSaveType type = static_cast<CalleeSaveType>(i);
+ callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
+ }
+ } else if (instruction_set_ == kX86_64) {
+ for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ CalleeSaveType type = static_cast<CalleeSaveType>(i);
+ callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
+ }
+ } else if (instruction_set_ == kArm64) {
+ for (int i = 0; i != kLastCalleeSaveType; ++i) {
+ CalleeSaveType type = static_cast<CalleeSaveType>(i);
+ callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
+ }
+ } else {
+ UNIMPLEMENTED(FATAL) << instruction_set_;
+ }
+}
+
void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) {
DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
callee_save_methods_[type] = method;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 1ee0b1a..07b47c3 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -34,6 +34,7 @@
#include "instrumentation.h"
#include "jobject_comparator.h"
#include "object_callbacks.h"
+#include "quick/quick_method_frame_info.h"
#include "runtime_stats.h"
#include "safe_map.h"
#include "fault_handler.h"
@@ -325,20 +326,25 @@
return callee_save_methods_[type];
}
+ QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
+ return callee_save_method_frame_infos_[type];
+ }
+
+ QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) const;
+
static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
}
+ InstructionSet GetInstructionSet() const {
+ return instruction_set_;
+ }
+
+ void SetInstructionSet(InstructionSet instruction_set);
+
void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
- mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet instruction_set,
- CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- mirror::ArtMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- mirror::ArtMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set)
+ mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -468,6 +474,9 @@
mirror::ArtMethod* imt_conflict_method_;
mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+ InstructionSet instruction_set_;
+ QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
+
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
bool is_concurrent_gc_enabled_;
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 7698d6a..dbd961f 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -25,7 +25,7 @@
// Scoped change into and out of a particular state. Handles Runnable transitions that require
// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
-// ScopedObjectAccess are used to handle the change into Runnable to get direct access to objects,
+// ScopedObjectAccess are used to handle the change into Runnable to Get direct access to objects,
// the unchecked variant doesn't aid annotalysis.
class ScopedThreadStateChange {
public:
diff --git a/runtime/sirt_ref-inl.h b/runtime/sirt_ref-inl.h
deleted file mode 100644
index 7de624a..0000000
--- a/runtime/sirt_ref-inl.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_SIRT_REF_INL_H_
-#define ART_RUNTIME_SIRT_REF_INL_H_
-
-#include "sirt_ref.h"
-
-#include "verify_object-inl.h"
-
-namespace art {
-
-template<class T> inline SirtRef<T>::SirtRef(Thread* self, T* object, bool should_verify)
- : self_(self), sirt_(object) {
- if (should_verify) {
- VerifyObject(object);
- }
- self_->PushSirt(&sirt_);
-}
-
-template<class T> inline SirtRef<T>::~SirtRef() {
- StackIndirectReferenceTable* top_sirt = self_->PopSirt();
- DCHECK_EQ(top_sirt, &sirt_);
-}
-
-template<class T> inline T* SirtRef<T>::reset(T* object, bool should_verify) {
- if (should_verify) {
- VerifyObject(object);
- }
- T* old_ref = get();
- sirt_.SetReference(0, object);
- return old_ref;
-}
-
-} // namespace art
-
-#endif // ART_RUNTIME_SIRT_REF_INL_H_
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
deleted file mode 100644
index cf23891..0000000
--- a/runtime/sirt_ref.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_SIRT_REF_H_
-#define ART_RUNTIME_SIRT_REF_H_
-
-#include "base/casts.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "stack_indirect_reference_table.h"
-#include "thread.h"
-
-namespace art {
-
-template<class T>
-class SirtRef {
- public:
- SirtRef(Thread* self, T* object, bool should_verify = true);
- ~SirtRef();
-
- T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return *get();
- }
- T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return get();
- }
- T* get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return down_cast<T*>(sirt_.GetReference(0));
- }
-
- // Returns the old reference.
- T* reset(T* object = nullptr, bool should_verify = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
- Thread* const self_;
- StackIndirectReferenceTable sirt_;
-
- DISALLOW_COPY_AND_ASSIGN(SirtRef);
-};
-
-// A version of SirtRef which disables the object verification.
-template<class T>
-class SirtRefNoVerify : public SirtRef<T> {
- public:
- SirtRefNoVerify(Thread* self, T* object) : SirtRef<T>(self, object, false) {}
- // Returns the old reference.
- T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return SirtRef<T>::reset(object, false);
- }
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_SIRT_REF_H_
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 6667419..e0189e9 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -23,6 +23,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
+#include "quick/quick_method_frame_info.h"
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
@@ -110,11 +111,9 @@
return NULL;
} else if (m->IsNative()) {
if (cur_quick_frame_ != NULL) {
- StackIndirectReferenceTable* sirt =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<char*>(cur_quick_frame_) +
- m->GetSirtOffsetInBytes());
- return sirt->GetReference(0);
+ HandleScope* hs = reinterpret_cast<HandleScope*>(
+ reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffsetInBytes());
+ return hs->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
@@ -142,18 +141,17 @@
DCHECK(m == GetMethod());
const VmapTable vmap_table(m->GetVmapTable());
uint32_t vmap_offset;
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo();
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t spill_mask = is_float ? m->GetFpSpillMask()
- : m->GetCoreSpillMask();
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
return GetGPR(vmap_table.ComputeRegister(spill_mask, vmap_offset, kind));
} else {
const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
- size_t frame_size = m->GetFrameSizeInBytes();
- return *GetVRegAddr(cur_quick_frame_, code_item, m->GetCoreSpillMask(), m->GetFpSpillMask(),
- frame_size, vreg);
+ return *GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
}
} else {
return cur_shadow_frame_->GetVReg(vreg);
@@ -167,19 +165,18 @@
DCHECK(m == GetMethod());
const VmapTable vmap_table(m->GetVmapTable());
uint32_t vmap_offset;
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo();
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
- uint32_t spill_mask = is_float ? m->GetFpSpillMask() : m->GetCoreSpillMask();
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kReferenceVReg);
SetGPR(reg, new_value);
} else {
const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
- uint32_t core_spills = m->GetCoreSpillMask();
- uint32_t fp_spills = m->GetFpSpillMask();
- size_t frame_size = m->GetFrameSizeInBytes();
- int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
+ int offset = GetVRegOffset(code_item, frame_info.CoreSpillMask(), frame_info.FpSpillMask(),
+ frame_info.FrameSizeInBytes(), vreg, kRuntimeISA);
byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
*reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
}
@@ -278,7 +275,7 @@
CHECK_NE(frame_size, 0u);
// A rough guess at an upper size we expect to see for a frame.
// 256 registers
- // 2 words Sirt overhead
+ // 2 words HandleScope overhead
// 3+3 register spills
// TODO: this seems architecture specific for the case of JNI frames.
// TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
diff --git a/runtime/stack.h b/runtime/stack.h
index 88ef78f..963983a 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -40,7 +40,7 @@
class Context;
class ShadowFrame;
-class StackIndirectReferenceTable;
+class HandleScope;
class ScopedObjectAccess;
class Thread;
@@ -677,10 +677,10 @@
return cur_shadow_frame_;
}
- StackIndirectReferenceTable* GetCurrentSirt() const {
+ HandleScope* GetCurrentHandleScope() const {
mirror::ArtMethod** sp = GetCurrentQuickFrame();
- ++sp; // Skip Method*; SIRT comes next;
- return reinterpret_cast<StackIndirectReferenceTable*>(sp);
+ ++sp; // Skip Method*; handle scope comes next;
+ return reinterpret_cast<HandleScope*>(sp);
}
std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/stack_indirect_reference_table.h b/runtime/stack_indirect_reference_table.h
deleted file mode 100644
index 3b632e7..0000000
--- a/runtime/stack_indirect_reference_table.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_STACK_INDIRECT_REFERENCE_TABLE_H_
-#define ART_RUNTIME_STACK_INDIRECT_REFERENCE_TABLE_H_
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "stack.h"
-#include "utils.h"
-
-namespace art {
-namespace mirror {
-class Object;
-}
-class Thread;
-
-// Stack allocated indirect reference table. It can allocated within
-// the bridge frame between managed and native code backed by stack
-// storage or manually allocated by SirtRef to hold one reference.
-class StackIndirectReferenceTable {
- public:
- explicit StackIndirectReferenceTable(mirror::Object* object) :
- link_(NULL), number_of_references_(1) {
- references_[0].Assign(object);
- }
-
- ~StackIndirectReferenceTable() {}
-
- // Number of references contained within this SIRT.
- uint32_t NumberOfReferences() const {
- return number_of_references_;
- }
-
- // We have versions with and without explicit pointer size of the following. The first two are
- // used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one
- // takes the pointer size explicitly so that at compile time we can cross-compile correctly.
-
- // Returns the size of a StackIndirectReferenceTable containing num_references sirts.
- static size_t SizeOf(uint32_t num_references) {
- size_t header_size = OFFSETOF_MEMBER(StackIndirectReferenceTable, references_);
- size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
- return header_size + data_size;
- }
-
- // Get the size of the SIRT for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedSirtSize(uint32_t num_references) {
- size_t sirt_size = SizeOf(num_references);
- return RoundUp(sirt_size, 8);
- }
-
- // Get the size of the SIRT for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedSirtSizeTarget(size_t pointer_size, uint32_t num_references) {
- // Assume that the layout is packed.
- size_t header_size = pointer_size + sizeof(number_of_references_);
- // This assumes there is no layout change between 32 and 64b.
- size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
- size_t sirt_size = header_size + data_size;
- return RoundUp(sirt_size, 8);
- }
-
- // Link to previous SIRT or NULL.
- StackIndirectReferenceTable* GetLink() const {
- return link_;
- }
-
- void SetLink(StackIndirectReferenceTable* sirt) {
- DCHECK_NE(this, sirt);
- link_ = sirt;
- }
-
- // Sets the number_of_references_ field for constructing tables out of raw memory. Warning: will
- // not resize anything.
- void SetNumberOfReferences(uint32_t num_references) {
- number_of_references_ = num_references;
- }
-
- mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, number_of_references_);
- return references_[i].AsMirrorPtr();
- }
-
- StackReference<mirror::Object>* GetStackReference(size_t i)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, number_of_references_);
- return &references_[i];
- }
-
- void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, number_of_references_);
- references_[i].Assign(object);
- }
-
- bool Contains(StackReference<mirror::Object>* sirt_entry) const {
- // A SIRT should always contain something. One created by the
- // jni_compiler should have a jobject/jclass as a native method is
- // passed in a this pointer or a class
- DCHECK_GT(number_of_references_, 0U);
- return ((&references_[0] <= sirt_entry)
- && (sirt_entry <= (&references_[number_of_references_ - 1])));
- }
-
- // Offset of link within SIRT, used by generated code
- static size_t LinkOffset(size_t pointer_size) {
- return 0;
- }
-
- // Offset of length within SIRT, used by generated code
- static size_t NumberOfReferencesOffset(size_t pointer_size) {
- return pointer_size;
- }
-
- // Offset of link within SIRT, used by generated code
- static size_t ReferencesOffset(size_t pointer_size) {
- return pointer_size + sizeof(number_of_references_);
- }
-
- private:
- StackIndirectReferenceTable() {}
-
- StackIndirectReferenceTable* link_;
- uint32_t number_of_references_;
-
- // number_of_references_ are available if this is allocated and filled in by jni_compiler.
- StackReference<mirror::Object> references_[1];
-
- DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable);
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_STACK_INDIRECT_REFERENCE_TABLE_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 00a66d7..d535118 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -44,6 +44,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
#include "gc/space/space.h"
+#include "handle_scope.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
#include "mirror/art_field-inl.h"
@@ -55,14 +56,14 @@
#include "monitor.h"
#include "object_utils.h"
#include "quick_exception_handler.h"
+#include "quick/quick_method_frame_info.h"
#include "reflection.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "stack.h"
-#include "stack_indirect_reference_table.h"
#include "thread-inl.h"
#include "thread_list.h"
#include "utils.h"
@@ -156,11 +157,7 @@
self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
self->tlsPtr_.jpeer = nullptr;
-
- {
- SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
- self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
- }
+ self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
Dbg::PostThreadStart(self);
// Invoke the 'run' method of our java.lang.Thread.
@@ -430,8 +427,9 @@
reinterpret_cast<jlong>(self));
ScopedObjectAccess soa(self);
- SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
- if (peer_thread_name.get() == nullptr) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
+ if (peer_thread_name.Get() == nullptr) {
// The Thread constructor should have set the Thread.name to a
// non-null value. However, because we can run without code
// available (in the compiler, in tests), we manually assign the
@@ -441,10 +439,10 @@
} else {
InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
}
- peer_thread_name.reset(GetThreadName(soa));
+ peer_thread_name.Assign(GetThreadName(soa));
}
// 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
- if (peer_thread_name.get() != nullptr) {
+ if (peer_thread_name.Get() != nullptr) {
SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
}
}
@@ -950,8 +948,7 @@
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- SirtRef<mirror::ArtMethod> method_ref(Thread::Current(), GetCurrentMethod(nullptr));
- DumpNativeStack(os, GetTid(), " native: ", method_ref.get());
+ DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr));
}
DumpJavaStack(os);
} else {
@@ -1106,8 +1103,9 @@
soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
// (This conditional is only needed for tests, where Thread.lock won't have been set.)
if (lock != nullptr) {
- SirtRef<mirror::Object> sirt_obj(self, lock);
- ObjectLock<mirror::Object> locker(self, &sirt_obj);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_obj(hs.NewHandle(lock));
+ ObjectLock<mirror::Object> locker(self, &h_obj);
locker.NotifyAll();
}
}
@@ -1207,28 +1205,28 @@
}
}
-size_t Thread::NumSirtReferences() {
+size_t Thread::NumHandleReferences() {
size_t count = 0;
- for (StackIndirectReferenceTable* cur = tlsPtr_.top_sirt; cur; cur = cur->GetLink()) {
+ for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
count += cur->NumberOfReferences();
}
return count;
}
-bool Thread::SirtContains(jobject obj) const {
- StackReference<mirror::Object>* sirt_entry =
+bool Thread::HandleScopeContains(jobject obj) const {
+ StackReference<mirror::Object>* hs_entry =
reinterpret_cast<StackReference<mirror::Object>*>(obj);
- for (StackIndirectReferenceTable* cur = tlsPtr_.top_sirt; cur; cur = cur->GetLink()) {
- if (cur->Contains(sirt_entry)) {
+ for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
+ if (cur->Contains(hs_entry)) {
return true;
}
}
- // JNI code invoked from portable code uses shadow frames rather than the SIRT.
- return tlsPtr_.managed_stack.ShadowFramesContain(sirt_entry);
+ // JNI code invoked from portable code uses shadow frames rather than the handle scope.
+ return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
}
-void Thread::SirtVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id) {
- for (StackIndirectReferenceTable* cur = tlsPtr_.top_sirt; cur; cur = cur->GetLink()) {
+void Thread::HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id) {
+ for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
size_t num_refs = cur->NumberOfReferences();
for (size_t j = 0; j < num_refs; ++j) {
mirror::Object* object = cur->GetReference(j);
@@ -1255,11 +1253,11 @@
if (kind == kLocal) {
IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
result = locals.Get(ref);
- } else if (kind == kSirtOrInvalid) {
+ } else if (kind == kHandleScopeOrInvalid) {
// TODO: make stack indirect reference table lookup more efficient.
- // Check if this is a local reference in the SIRT.
- if (LIKELY(SirtContains(obj))) {
- // Read from SIRT.
+ // Check if this is a local reference in the handle scope.
+ if (LIKELY(HandleScopeContains(obj))) {
+ // Read from handle scope.
result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
VerifyObject(result);
} else {
@@ -1368,11 +1366,11 @@
bool Init(int depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Allocate method trace with an extra slot that will hold the PC trace
- SirtRef<mirror::ObjectArray<mirror::Object> >
- method_trace(self_,
- Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
- depth + 1));
- if (method_trace.get() == nullptr) {
+ StackHandleScope<1> hs(self_);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::ObjectArray<mirror::Object>> method_trace(
+ hs.NewHandle(class_linker->AllocObjectArray<mirror::Object>(self_, depth + 1)));
+ if (method_trace.Get() == nullptr) {
return false;
}
mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
@@ -1387,7 +1385,7 @@
const char* last_no_suspend_cause =
self_->StartAssertNoThreadSuspension("Building internal stack trace");
CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
- method_trace_ = method_trace.get();
+ method_trace_ = method_trace.Get();
dex_pc_trace_ = dex_pc_trace;
return true;
}
@@ -1497,11 +1495,12 @@
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
MethodHelper mh(method);
int32_t line_number;
- SirtRef<mirror::String> class_name_object(soa.Self(), nullptr);
- SirtRef<mirror::String> source_name_object(soa.Self(), nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
+ auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
if (method->IsProxyMethod()) {
line_number = -1;
- class_name_object.reset(method->GetDeclaringClass()->GetName());
+ class_name_object.Assign(method->GetDeclaringClass()->GetName());
// source_name_object intentionally left null for proxy methods
} else {
mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
@@ -1512,24 +1511,23 @@
const char* descriptor = mh.GetDeclaringClassDescriptor();
CHECK(descriptor != nullptr);
std::string class_name(PrettyDescriptor(descriptor));
- class_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
- if (class_name_object.get() == nullptr) {
+ class_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
+ if (class_name_object.Get() == nullptr) {
return nullptr;
}
const char* source_file = mh.GetDeclaringClassSourceFile();
if (source_file != nullptr) {
- source_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
- if (source_name_object.get() == nullptr) {
+ source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
+ if (source_name_object.Get() == nullptr) {
return nullptr;
}
}
}
const char* method_name = mh.GetName();
CHECK(method_name != nullptr);
- SirtRef<mirror::String> method_name_object(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(),
- method_name));
- if (method_name_object.get() == nullptr) {
+ Handle<mirror::String> method_name_object(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
+ if (method_name_object.Get() == nullptr) {
return nullptr;
}
mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
@@ -1572,23 +1570,24 @@
const char* msg) {
DCHECK_EQ(this, Thread::Current());
ScopedObjectAccessUnchecked soa(this);
+ StackHandleScope<5> hs(soa.Self());
// Ensure we don't forget arguments over object allocation.
- SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
- SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod());
+ Handle<mirror::Object> saved_throw_this(hs.NewHandle(throw_location.GetThis()));
+ Handle<mirror::ArtMethod> saved_throw_method(hs.NewHandle(throw_location.GetMethod()));
// Ignore the cause throw location. TODO: should we report this as a re-throw?
ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException(nullptr)));
ClearException();
Runtime* runtime = Runtime::Current();
mirror::ClassLoader* cl = nullptr;
- if (saved_throw_method.get() != nullptr) {
- cl = saved_throw_method.get()->GetDeclaringClass()->GetClassLoader();
+ if (saved_throw_method.Get() != nullptr) {
+ cl = saved_throw_method.Get()->GetDeclaringClass()->GetClassLoader();
}
- SirtRef<mirror::ClassLoader> class_loader(this, cl);
- SirtRef<mirror::Class>
- exception_class(this, runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
- class_loader));
- if (UNLIKELY(exception_class.get() == nullptr)) {
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(cl));
+ Handle<mirror::Class> exception_class(
+ hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
+ class_loader)));
+ if (UNLIKELY(exception_class.Get() == nullptr)) {
CHECK(IsExceptionPending());
LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
return;
@@ -1599,12 +1598,12 @@
return;
}
DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
- SirtRef<mirror::Throwable> exception(this,
- down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
+ Handle<mirror::Throwable> exception(
+ hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
// If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
- if (exception.get() == nullptr) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
+ if (exception.Get() == nullptr) {
+ ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
throw_location.GetDexPc());
SetException(gc_safe_throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
return;
@@ -1656,9 +1655,9 @@
if (trace.get() != nullptr) {
exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
}
- ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.get());
+ SetException(gc_safe_throw_location, exception.Get());
} else {
jvalue jv_args[2];
size_t i = 0;
@@ -1671,11 +1670,11 @@
jv_args[i].l = cause.get();
++i;
}
- InvokeWithJValues(soa, exception.get(), soa.EncodeMethod(exception_init_method), jv_args);
+ InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
if (LIKELY(!IsExceptionPending())) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.get());
+ SetException(gc_safe_throw_location, exception.Get());
}
}
}
@@ -1732,7 +1731,7 @@
DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
DO_THREAD_OFFSET(TopOfManagedStackPcOffset<ptr_size>(), "top_quick_frame_pc")
DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
- DO_THREAD_OFFSET(TopSirtOffset<ptr_size>(), "top_sirt")
+ DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
#undef DO_THREAD_OFFSET
@@ -1966,7 +1965,7 @@
mirror::ArtMethod* m = shadow_frame->GetMethod();
size_t num_regs = shadow_frame->NumberOfVRegs();
if (m->IsNative() || shadow_frame->HasReferenceArray()) {
- // SIRT for JNI or References for interpreter.
+ // handle scope for JNI or References for interpreter.
for (size_t reg = 0; reg < num_regs; ++reg) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
if (ref != nullptr) {
@@ -2019,9 +2018,7 @@
const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
DCHECK(reg_bitmap != nullptr);
const VmapTable vmap_table(m->GetVmapTable());
- uint32_t core_spills = m->GetCoreSpillMask();
- uint32_t fp_spills = m->GetFpSpillMask();
- size_t frame_size = m->GetFrameSizeInBytes();
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo();
// For all dex registers in the bitmap
mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
@@ -2030,7 +2027,8 @@
if (TestBitmap(reg, reg_bitmap)) {
uint32_t vmap_offset;
if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
- int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+ int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
+ kReferenceVReg);
// This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
if (*ref_addr != nullptr) {
@@ -2039,8 +2037,8 @@
} else {
StackReference<mirror::Object>* ref_addr =
reinterpret_cast<StackReference<mirror::Object>*>(
- GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
- reg));
+ GetVRegAddr(cur_quick_frame, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
mirror::Object* ref = ref_addr->AsMirrorPtr();
if (ref != nullptr) {
mirror::Object* new_ref = ref;
@@ -2105,7 +2103,7 @@
}
tlsPtr_.jni_env->locals.VisitRoots(visitor, arg, thread_id, kRootJNILocal);
tlsPtr_.jni_env->monitors.VisitRoots(visitor, arg, thread_id, kRootJNIMonitor);
- SirtVisitRoots(visitor, arg, thread_id);
+ HandleScopeVisitRoots(visitor, arg, thread_id);
if (tlsPtr_.debug_invoke_req != nullptr) {
tlsPtr_.debug_invoke_req->VisitRoots(visitor, arg, thread_id, kRootDebugger);
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 32311e1..be7634f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -31,15 +31,15 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/allocator/rosalloc.h"
#include "globals.h"
+#include "handle_scope.h"
#include "jvalue.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "runtime_stats.h"
#include "stack.h"
-#include "stack_indirect_reference_table.h"
#include "thread_state.h"
#include "throw_location.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
@@ -103,6 +103,11 @@
// Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
// But this one works rather well.
static constexpr size_t kStackOverflowReservedBytes = 32 * KB;
+#elif defined(__i386__)
+ // TODO: Bumped to workaround regression (http://b/14982147) Specifically to fix:
+ // test-art-host-run-test-interpreter-018-stack-overflow
+ // test-art-host-run-test-interpreter-107-int-math2
+ static constexpr size_t kStackOverflowReservedBytes = 24 * KB;
#else
static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
#endif
@@ -648,35 +653,40 @@
return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
}
- // Number of references in SIRTs on this thread.
- size_t NumSirtReferences();
+ // Number of references in handle scope on this thread.
+ size_t NumHandleReferences();
- // Number of references allocated in SIRTs & JNI shadow frames on this thread.
+ // Number of references allocated in handle scopes & JNI shadow frames on this thread.
size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return NumSirtReferences() + NumJniShadowFrameReferences();
+ return NumHandleReferences() + NumJniShadowFrameReferences();
};
// Is the given obj in this thread's stack indirect reference table?
- bool SirtContains(jobject obj) const;
+ bool HandleScopeContains(jobject obj) const;
- void SirtVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
+ void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PushSirt(StackIndirectReferenceTable* sirt) {
- sirt->SetLink(tlsPtr_.top_sirt);
- tlsPtr_.top_sirt = sirt;
+ HandleScope* GetTopHandleScope() {
+ return tlsPtr_.top_handle_scope;
}
- StackIndirectReferenceTable* PopSirt() {
- StackIndirectReferenceTable* sirt = tlsPtr_.top_sirt;
- DCHECK(sirt != NULL);
- tlsPtr_.top_sirt = tlsPtr_.top_sirt->GetLink();
- return sirt;
+ void PushHandleScope(HandleScope* handle_scope) {
+ handle_scope->SetLink(tlsPtr_.top_handle_scope);
+ tlsPtr_.top_handle_scope = handle_scope;
+ }
+
+ HandleScope* PopHandleScope() {
+ HandleScope* handle_scope = tlsPtr_.top_handle_scope;
+ DCHECK(handle_scope != nullptr);
+ tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
+ return handle_scope;
}
template<size_t pointer_size>
- static ThreadOffset<pointer_size> TopSirtOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, top_sirt));
+ static ThreadOffset<pointer_size> TopHandleScopeOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ top_handle_scope));
}
DebugInvokeReq* GetInvokeReq() const {
@@ -950,7 +960,7 @@
managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
- top_sirt(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
+ top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
deoptimization_shadow_frame(nullptr), name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
@@ -1006,8 +1016,8 @@
// If we're blocked in MonitorEnter, this is the object we're trying to lock.
mirror::Object* monitor_enter_object;
- // Top of linked list of stack indirect reference tables or NULL for none.
- StackIndirectReferenceTable* top_sirt;
+ // Top of linked list of handle scopes or nullptr for none.
+ HandleScope* top_handle_scope;
// Needed to get the right ClassLoader in JNI_OnLoad, but also
// useful for testing.
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index b8735a3..23bf294 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -96,7 +96,7 @@
void SetMaxActiveWorkers(size_t threads);
protected:
- // Get a task to run, blocks if there are no tasks left
+ // get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self);
// Try to get a task, returning NULL if there is none available.
diff --git a/runtime/trace.h b/runtime/trace.h
index bf4995a..ef6c642 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -27,7 +27,7 @@
#include "instrumentation.h"
#include "os.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 1dc2da0..3645ed2 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -27,56 +27,57 @@
TEST_F(TransactionTest, Object_class) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
Runtime::Current()->ExitTransactionMode();
// Aborting transaction must not clear the Object::class field.
transaction.Abort();
- EXPECT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ EXPECT_EQ(h_obj->GetClass(), h_klass.Get());
}
TEST_F(TransactionTest, Object_monitor) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Lock object's monitor outside the transaction.
- sirt_obj->MonitorEnter(soa.Self());
- uint32_t old_lock_word = sirt_obj->GetLockWord(false).GetValue();
+ h_obj->MonitorEnter(soa.Self());
+ uint32_t old_lock_word = h_obj->GetLockWord(false).GetValue();
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
// Unlock object's monitor inside the transaction.
- sirt_obj->MonitorExit(soa.Self());
- uint32_t new_lock_word = sirt_obj->GetLockWord(false).GetValue();
+ h_obj->MonitorExit(soa.Self());
+ uint32_t new_lock_word = h_obj->GetLockWord(false).GetValue();
Runtime::Current()->ExitTransactionMode();
// Aborting transaction must not clear the Object::class field.
transaction.Abort();
- uint32_t aborted_lock_word = sirt_obj->GetLockWord(false).GetValue();
+ uint32_t aborted_lock_word = h_obj->GetLockWord(false).GetValue();
EXPECT_NE(old_lock_word, new_lock_word);
EXPECT_EQ(aborted_lock_word, new_lock_word);
}
TEST_F(TransactionTest, Array_length) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
constexpr int32_t kArraySize = 2;
@@ -84,304 +85,301 @@
Runtime::Current()->EnterTransactionMode(&transaction);
// Allocate an array during transaction.
- SirtRef<mirror::Array> sirt_obj(soa.Self(),
- mirror::Array::Alloc<true>(soa.Self(), sirt_klass.get(),
- kArraySize,
- sirt_klass->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Array> h_obj(
+ hs.NewHandle(
+ mirror::Array::Alloc<true>(soa.Self(), h_klass.Get(), kArraySize,
+ h_klass->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
Runtime::Current()->ExitTransactionMode();
// Aborting transaction must not clear the Object::class field.
transaction.Abort();
- EXPECT_EQ(sirt_obj->GetLength(), kArraySize);
+ EXPECT_EQ(h_obj->GetLength(), kArraySize);
}
TEST_F(TransactionTest, StaticFieldsTest) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(), "LStaticFieldsTest;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- ASSERT_TRUE(sirt_klass->IsInitialized());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticFieldsTest;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->EnsureInitialized(h_klass, true, true);
+ ASSERT_TRUE(h_klass->IsInitialized());
// Lookup fields.
- mirror::ArtField* booleanField = sirt_klass->FindDeclaredStaticField("booleanField", "Z");
+ mirror::ArtField* booleanField = h_klass->FindDeclaredStaticField("booleanField", "Z");
ASSERT_TRUE(booleanField != nullptr);
ASSERT_EQ(FieldHelper(booleanField).GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
- ASSERT_EQ(booleanField->GetBoolean(sirt_klass.get()), false);
+ ASSERT_EQ(booleanField->GetBoolean(h_klass.Get()), false);
- mirror::ArtField* byteField = sirt_klass->FindDeclaredStaticField("byteField", "B");
+ mirror::ArtField* byteField = h_klass->FindDeclaredStaticField("byteField", "B");
ASSERT_TRUE(byteField != nullptr);
ASSERT_EQ(FieldHelper(byteField).GetTypeAsPrimitiveType(), Primitive::kPrimByte);
- ASSERT_EQ(byteField->GetByte(sirt_klass.get()), 0);
+ ASSERT_EQ(byteField->GetByte(h_klass.Get()), 0);
- mirror::ArtField* charField = sirt_klass->FindDeclaredStaticField("charField", "C");
+ mirror::ArtField* charField = h_klass->FindDeclaredStaticField("charField", "C");
ASSERT_TRUE(charField != nullptr);
ASSERT_EQ(FieldHelper(charField).GetTypeAsPrimitiveType(), Primitive::kPrimChar);
- ASSERT_EQ(charField->GetChar(sirt_klass.get()), 0u);
+ ASSERT_EQ(charField->GetChar(h_klass.Get()), 0u);
- mirror::ArtField* shortField = sirt_klass->FindDeclaredStaticField("shortField", "S");
+ mirror::ArtField* shortField = h_klass->FindDeclaredStaticField("shortField", "S");
ASSERT_TRUE(shortField != nullptr);
ASSERT_EQ(FieldHelper(shortField).GetTypeAsPrimitiveType(), Primitive::kPrimShort);
- ASSERT_EQ(shortField->GetShort(sirt_klass.get()), 0);
+ ASSERT_EQ(shortField->GetShort(h_klass.Get()), 0);
- mirror::ArtField* intField = sirt_klass->FindDeclaredStaticField("intField", "I");
+ mirror::ArtField* intField = h_klass->FindDeclaredStaticField("intField", "I");
ASSERT_TRUE(intField != nullptr);
ASSERT_EQ(FieldHelper(intField).GetTypeAsPrimitiveType(), Primitive::kPrimInt);
- ASSERT_EQ(intField->GetInt(sirt_klass.get()), 0);
+ ASSERT_EQ(intField->GetInt(h_klass.Get()), 0);
- mirror::ArtField* longField = sirt_klass->FindDeclaredStaticField("longField", "J");
+ mirror::ArtField* longField = h_klass->FindDeclaredStaticField("longField", "J");
ASSERT_TRUE(longField != nullptr);
ASSERT_EQ(FieldHelper(longField).GetTypeAsPrimitiveType(), Primitive::kPrimLong);
- ASSERT_EQ(longField->GetLong(sirt_klass.get()), static_cast<int64_t>(0));
+ ASSERT_EQ(longField->GetLong(h_klass.Get()), static_cast<int64_t>(0));
- mirror::ArtField* floatField = sirt_klass->FindDeclaredStaticField("floatField", "F");
+ mirror::ArtField* floatField = h_klass->FindDeclaredStaticField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(FieldHelper(floatField).GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
- ASSERT_EQ(floatField->GetFloat(sirt_klass.get()), static_cast<float>(0.0f));
+ ASSERT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
- mirror::ArtField* doubleField = sirt_klass->FindDeclaredStaticField("doubleField", "D");
+ mirror::ArtField* doubleField = h_klass->FindDeclaredStaticField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(FieldHelper(doubleField).GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
- ASSERT_EQ(doubleField->GetDouble(sirt_klass.get()), static_cast<double>(0.0));
+ ASSERT_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
- mirror::ArtField* objectField = sirt_klass->FindDeclaredStaticField("objectField",
+ mirror::ArtField* objectField = h_klass->FindDeclaredStaticField("objectField",
"Ljava/lang/Object;");
ASSERT_TRUE(objectField != nullptr);
ASSERT_EQ(FieldHelper(objectField).GetTypeAsPrimitiveType(), Primitive::kPrimNot);
- ASSERT_EQ(objectField->GetObject(sirt_klass.get()), nullptr);
+ ASSERT_EQ(objectField->GetObject(h_klass.Get()), nullptr);
// Create a java.lang.Object instance to set objectField.
- SirtRef<mirror::Class> object_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(object_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Class> object_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(object_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Modify fields inside transaction and abort it.
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- booleanField->SetBoolean<true>(sirt_klass.get(), true);
- byteField->SetByte<true>(sirt_klass.get(), 1);
- charField->SetChar<true>(sirt_klass.get(), 1u);
- shortField->SetShort<true>(sirt_klass.get(), 1);
- intField->SetInt<true>(sirt_klass.get(), 1);
- longField->SetLong<true>(sirt_klass.get(), 1);
- floatField->SetFloat<true>(sirt_klass.get(), 1.0);
- doubleField->SetDouble<true>(sirt_klass.get(), 1.0);
- objectField->SetObject<true>(sirt_klass.get(), sirt_obj.get());
+ booleanField->SetBoolean<true>(h_klass.Get(), true);
+ byteField->SetByte<true>(h_klass.Get(), 1);
+ charField->SetChar<true>(h_klass.Get(), 1u);
+ shortField->SetShort<true>(h_klass.Get(), 1);
+ intField->SetInt<true>(h_klass.Get(), 1);
+ longField->SetLong<true>(h_klass.Get(), 1);
+ floatField->SetFloat<true>(h_klass.Get(), 1.0);
+ doubleField->SetDouble<true>(h_klass.Get(), 1.0);
+ objectField->SetObject<true>(h_klass.Get(), h_obj.Get());
Runtime::Current()->ExitTransactionMode();
transaction.Abort();
// Check values have properly been restored to their original (default) value.
- EXPECT_EQ(booleanField->GetBoolean(sirt_klass.get()), false);
- EXPECT_EQ(byteField->GetByte(sirt_klass.get()), 0);
- EXPECT_EQ(charField->GetChar(sirt_klass.get()), 0u);
- EXPECT_EQ(shortField->GetShort(sirt_klass.get()), 0);
- EXPECT_EQ(intField->GetInt(sirt_klass.get()), 0);
- EXPECT_EQ(longField->GetLong(sirt_klass.get()), static_cast<int64_t>(0));
- EXPECT_EQ(floatField->GetFloat(sirt_klass.get()), static_cast<float>(0.0f));
- EXPECT_EQ(doubleField->GetDouble(sirt_klass.get()), static_cast<double>(0.0));
- EXPECT_EQ(objectField->GetObject(sirt_klass.get()), nullptr);
+ EXPECT_EQ(booleanField->GetBoolean(h_klass.Get()), false);
+ EXPECT_EQ(byteField->GetByte(h_klass.Get()), 0);
+ EXPECT_EQ(charField->GetChar(h_klass.Get()), 0u);
+ EXPECT_EQ(shortField->GetShort(h_klass.Get()), 0);
+ EXPECT_EQ(intField->GetInt(h_klass.Get()), 0);
+ EXPECT_EQ(longField->GetLong(h_klass.Get()), static_cast<int64_t>(0));
+ EXPECT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
+ EXPECT_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
+ EXPECT_EQ(objectField->GetObject(h_klass.Get()), nullptr);
}
TEST_F(TransactionTest, InstanceFieldsTest) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(), "LInstanceFieldsTest;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- ASSERT_TRUE(sirt_klass->IsInitialized());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LInstanceFieldsTest;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->EnsureInitialized(h_klass, true, true);
+ ASSERT_TRUE(h_klass->IsInitialized());
// Allocate an InstanceFieldTest object.
- SirtRef<mirror::Object> sirt_instance(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_instance.get() != nullptr);
+ Handle<mirror::Object> h_instance(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_instance.Get() != nullptr);
// Lookup fields.
- mirror::ArtField* booleanField = sirt_klass->FindDeclaredInstanceField("booleanField", "Z");
+ mirror::ArtField* booleanField = h_klass->FindDeclaredInstanceField("booleanField", "Z");
ASSERT_TRUE(booleanField != nullptr);
ASSERT_EQ(FieldHelper(booleanField).GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
- ASSERT_EQ(booleanField->GetBoolean(sirt_instance.get()), false);
+ ASSERT_EQ(booleanField->GetBoolean(h_instance.Get()), false);
- mirror::ArtField* byteField = sirt_klass->FindDeclaredInstanceField("byteField", "B");
+ mirror::ArtField* byteField = h_klass->FindDeclaredInstanceField("byteField", "B");
ASSERT_TRUE(byteField != nullptr);
ASSERT_EQ(FieldHelper(byteField).GetTypeAsPrimitiveType(), Primitive::kPrimByte);
- ASSERT_EQ(byteField->GetByte(sirt_instance.get()), 0);
+ ASSERT_EQ(byteField->GetByte(h_instance.Get()), 0);
- mirror::ArtField* charField = sirt_klass->FindDeclaredInstanceField("charField", "C");
+ mirror::ArtField* charField = h_klass->FindDeclaredInstanceField("charField", "C");
ASSERT_TRUE(charField != nullptr);
ASSERT_EQ(FieldHelper(charField).GetTypeAsPrimitiveType(), Primitive::kPrimChar);
- ASSERT_EQ(charField->GetChar(sirt_instance.get()), 0u);
+ ASSERT_EQ(charField->GetChar(h_instance.Get()), 0u);
- mirror::ArtField* shortField = sirt_klass->FindDeclaredInstanceField("shortField", "S");
+ mirror::ArtField* shortField = h_klass->FindDeclaredInstanceField("shortField", "S");
ASSERT_TRUE(shortField != nullptr);
ASSERT_EQ(FieldHelper(shortField).GetTypeAsPrimitiveType(), Primitive::kPrimShort);
- ASSERT_EQ(shortField->GetShort(sirt_instance.get()), 0);
+ ASSERT_EQ(shortField->GetShort(h_instance.Get()), 0);
- mirror::ArtField* intField = sirt_klass->FindDeclaredInstanceField("intField", "I");
+ mirror::ArtField* intField = h_klass->FindDeclaredInstanceField("intField", "I");
ASSERT_TRUE(intField != nullptr);
ASSERT_EQ(FieldHelper(intField).GetTypeAsPrimitiveType(), Primitive::kPrimInt);
- ASSERT_EQ(intField->GetInt(sirt_instance.get()), 0);
+ ASSERT_EQ(intField->GetInt(h_instance.Get()), 0);
- mirror::ArtField* longField = sirt_klass->FindDeclaredInstanceField("longField", "J");
+ mirror::ArtField* longField = h_klass->FindDeclaredInstanceField("longField", "J");
ASSERT_TRUE(longField != nullptr);
ASSERT_EQ(FieldHelper(longField).GetTypeAsPrimitiveType(), Primitive::kPrimLong);
- ASSERT_EQ(longField->GetLong(sirt_instance.get()), static_cast<int64_t>(0));
+ ASSERT_EQ(longField->GetLong(h_instance.Get()), static_cast<int64_t>(0));
- mirror::ArtField* floatField = sirt_klass->FindDeclaredInstanceField("floatField", "F");
+ mirror::ArtField* floatField = h_klass->FindDeclaredInstanceField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(FieldHelper(floatField).GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
- ASSERT_EQ(floatField->GetFloat(sirt_instance.get()), static_cast<float>(0.0f));
+ ASSERT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
- mirror::ArtField* doubleField = sirt_klass->FindDeclaredInstanceField("doubleField", "D");
+ mirror::ArtField* doubleField = h_klass->FindDeclaredInstanceField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(FieldHelper(doubleField).GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
- ASSERT_EQ(doubleField->GetDouble(sirt_instance.get()), static_cast<double>(0.0));
+ ASSERT_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
- mirror::ArtField* objectField = sirt_klass->FindDeclaredInstanceField("objectField",
+ mirror::ArtField* objectField = h_klass->FindDeclaredInstanceField("objectField",
"Ljava/lang/Object;");
ASSERT_TRUE(objectField != nullptr);
ASSERT_EQ(FieldHelper(objectField).GetTypeAsPrimitiveType(), Primitive::kPrimNot);
- ASSERT_EQ(objectField->GetObject(sirt_instance.get()), nullptr);
+ ASSERT_EQ(objectField->GetObject(h_instance.Get()), nullptr);
// Create a java.lang.Object instance to set objectField.
- SirtRef<mirror::Class> object_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(object_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Class> object_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(object_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Modify fields inside transaction and abort it.
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- booleanField->SetBoolean<true>(sirt_instance.get(), true);
- byteField->SetByte<true>(sirt_instance.get(), 1);
- charField->SetChar<true>(sirt_instance.get(), 1u);
- shortField->SetShort<true>(sirt_instance.get(), 1);
- intField->SetInt<true>(sirt_instance.get(), 1);
- longField->SetLong<true>(sirt_instance.get(), 1);
- floatField->SetFloat<true>(sirt_instance.get(), 1.0);
- doubleField->SetDouble<true>(sirt_instance.get(), 1.0);
- objectField->SetObject<true>(sirt_instance.get(), sirt_obj.get());
+ booleanField->SetBoolean<true>(h_instance.Get(), true);
+ byteField->SetByte<true>(h_instance.Get(), 1);
+ charField->SetChar<true>(h_instance.Get(), 1u);
+ shortField->SetShort<true>(h_instance.Get(), 1);
+ intField->SetInt<true>(h_instance.Get(), 1);
+ longField->SetLong<true>(h_instance.Get(), 1);
+ floatField->SetFloat<true>(h_instance.Get(), 1.0);
+ doubleField->SetDouble<true>(h_instance.Get(), 1.0);
+ objectField->SetObject<true>(h_instance.Get(), h_obj.Get());
Runtime::Current()->ExitTransactionMode();
transaction.Abort();
// Check values have properly been restored to their original (default) value.
- EXPECT_EQ(booleanField->GetBoolean(sirt_instance.get()), false);
- EXPECT_EQ(byteField->GetByte(sirt_instance.get()), 0);
- EXPECT_EQ(charField->GetChar(sirt_instance.get()), 0u);
- EXPECT_EQ(shortField->GetShort(sirt_instance.get()), 0);
- EXPECT_EQ(intField->GetInt(sirt_instance.get()), 0);
- EXPECT_EQ(longField->GetLong(sirt_instance.get()), static_cast<int64_t>(0));
- EXPECT_EQ(floatField->GetFloat(sirt_instance.get()), static_cast<float>(0.0f));
- EXPECT_EQ(doubleField->GetDouble(sirt_instance.get()), static_cast<double>(0.0));
- EXPECT_EQ(objectField->GetObject(sirt_instance.get()), nullptr);
+ EXPECT_EQ(booleanField->GetBoolean(h_instance.Get()), false);
+ EXPECT_EQ(byteField->GetByte(h_instance.Get()), 0);
+ EXPECT_EQ(charField->GetChar(h_instance.Get()), 0u);
+ EXPECT_EQ(shortField->GetShort(h_instance.Get()), 0);
+ EXPECT_EQ(intField->GetInt(h_instance.Get()), 0);
+ EXPECT_EQ(longField->GetLong(h_instance.Get()), static_cast<int64_t>(0));
+ EXPECT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
+ EXPECT_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
+ EXPECT_EQ(objectField->GetObject(h_instance.Get()), nullptr);
}
TEST_F(TransactionTest, StaticArrayFieldsTest) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(), "LStaticArrayFieldsTest;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- ASSERT_TRUE(sirt_klass->IsInitialized());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticArrayFieldsTest;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->EnsureInitialized(h_klass, true, true);
+ ASSERT_TRUE(h_klass->IsInitialized());
// Lookup fields.
- mirror::ArtField* booleanArrayField = sirt_klass->FindDeclaredStaticField("booleanArrayField", "[Z");
+ mirror::ArtField* booleanArrayField = h_klass->FindDeclaredStaticField("booleanArrayField", "[Z");
ASSERT_TRUE(booleanArrayField != nullptr);
- mirror::BooleanArray* booleanArray = booleanArrayField->GetObject(sirt_klass.get())->AsBooleanArray();
+ mirror::BooleanArray* booleanArray = booleanArrayField->GetObject(h_klass.Get())->AsBooleanArray();
ASSERT_TRUE(booleanArray != nullptr);
ASSERT_EQ(booleanArray->GetLength(), 1);
ASSERT_EQ(booleanArray->GetWithoutChecks(0), false);
- mirror::ArtField* byteArrayField = sirt_klass->FindDeclaredStaticField("byteArrayField", "[B");
+ mirror::ArtField* byteArrayField = h_klass->FindDeclaredStaticField("byteArrayField", "[B");
ASSERT_TRUE(byteArrayField != nullptr);
- mirror::ByteArray* byteArray = byteArrayField->GetObject(sirt_klass.get())->AsByteArray();
+ mirror::ByteArray* byteArray = byteArrayField->GetObject(h_klass.Get())->AsByteArray();
ASSERT_TRUE(byteArray != nullptr);
ASSERT_EQ(byteArray->GetLength(), 1);
ASSERT_EQ(byteArray->GetWithoutChecks(0), 0);
- mirror::ArtField* charArrayField = sirt_klass->FindDeclaredStaticField("charArrayField", "[C");
+ mirror::ArtField* charArrayField = h_klass->FindDeclaredStaticField("charArrayField", "[C");
ASSERT_TRUE(charArrayField != nullptr);
- mirror::CharArray* charArray = charArrayField->GetObject(sirt_klass.get())->AsCharArray();
+ mirror::CharArray* charArray = charArrayField->GetObject(h_klass.Get())->AsCharArray();
ASSERT_TRUE(charArray != nullptr);
ASSERT_EQ(charArray->GetLength(), 1);
ASSERT_EQ(charArray->GetWithoutChecks(0), 0u);
- mirror::ArtField* shortArrayField = sirt_klass->FindDeclaredStaticField("shortArrayField", "[S");
+ mirror::ArtField* shortArrayField = h_klass->FindDeclaredStaticField("shortArrayField", "[S");
ASSERT_TRUE(shortArrayField != nullptr);
- mirror::ShortArray* shortArray = shortArrayField->GetObject(sirt_klass.get())->AsShortArray();
+ mirror::ShortArray* shortArray = shortArrayField->GetObject(h_klass.Get())->AsShortArray();
ASSERT_TRUE(shortArray != nullptr);
ASSERT_EQ(shortArray->GetLength(), 1);
ASSERT_EQ(shortArray->GetWithoutChecks(0), 0);
- mirror::ArtField* intArrayField = sirt_klass->FindDeclaredStaticField("intArrayField", "[I");
+ mirror::ArtField* intArrayField = h_klass->FindDeclaredStaticField("intArrayField", "[I");
ASSERT_TRUE(intArrayField != nullptr);
- mirror::IntArray* intArray = intArrayField->GetObject(sirt_klass.get())->AsIntArray();
+ mirror::IntArray* intArray = intArrayField->GetObject(h_klass.Get())->AsIntArray();
ASSERT_TRUE(intArray != nullptr);
ASSERT_EQ(intArray->GetLength(), 1);
ASSERT_EQ(intArray->GetWithoutChecks(0), 0);
- mirror::ArtField* longArrayField = sirt_klass->FindDeclaredStaticField("longArrayField", "[J");
+ mirror::ArtField* longArrayField = h_klass->FindDeclaredStaticField("longArrayField", "[J");
ASSERT_TRUE(longArrayField != nullptr);
- mirror::LongArray* longArray = longArrayField->GetObject(sirt_klass.get())->AsLongArray();
+ mirror::LongArray* longArray = longArrayField->GetObject(h_klass.Get())->AsLongArray();
ASSERT_TRUE(longArray != nullptr);
ASSERT_EQ(longArray->GetLength(), 1);
ASSERT_EQ(longArray->GetWithoutChecks(0), static_cast<int64_t>(0));
- mirror::ArtField* floatArrayField = sirt_klass->FindDeclaredStaticField("floatArrayField", "[F");
+ mirror::ArtField* floatArrayField = h_klass->FindDeclaredStaticField("floatArrayField", "[F");
ASSERT_TRUE(floatArrayField != nullptr);
- mirror::FloatArray* floatArray = floatArrayField->GetObject(sirt_klass.get())->AsFloatArray();
+ mirror::FloatArray* floatArray = floatArrayField->GetObject(h_klass.Get())->AsFloatArray();
ASSERT_TRUE(floatArray != nullptr);
ASSERT_EQ(floatArray->GetLength(), 1);
ASSERT_EQ(floatArray->GetWithoutChecks(0), static_cast<float>(0.0f));
- mirror::ArtField* doubleArrayField = sirt_klass->FindDeclaredStaticField("doubleArrayField", "[D");
+ mirror::ArtField* doubleArrayField = h_klass->FindDeclaredStaticField("doubleArrayField", "[D");
ASSERT_TRUE(doubleArrayField != nullptr);
- mirror::DoubleArray* doubleArray = doubleArrayField->GetObject(sirt_klass.get())->AsDoubleArray();
+ mirror::DoubleArray* doubleArray = doubleArrayField->GetObject(h_klass.Get())->AsDoubleArray();
ASSERT_TRUE(doubleArray != nullptr);
ASSERT_EQ(doubleArray->GetLength(), 1);
ASSERT_EQ(doubleArray->GetWithoutChecks(0), static_cast<double>(0.0f));
- mirror::ArtField* objectArrayField = sirt_klass->FindDeclaredStaticField("objectArrayField",
+ mirror::ArtField* objectArrayField = h_klass->FindDeclaredStaticField("objectArrayField",
"[Ljava/lang/Object;");
ASSERT_TRUE(objectArrayField != nullptr);
mirror::ObjectArray<mirror::Object>* objectArray =
- objectArrayField->GetObject(sirt_klass.get())->AsObjectArray<mirror::Object>();
+ objectArrayField->GetObject(h_klass.Get())->AsObjectArray<mirror::Object>();
ASSERT_TRUE(objectArray != nullptr);
ASSERT_EQ(objectArray->GetLength(), 1);
ASSERT_EQ(objectArray->GetWithoutChecks(0), nullptr);
// Create a java.lang.Object instance to set objectField.
- SirtRef<mirror::Class> object_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(object_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Class> object_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(object_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Modify fields inside transaction and abort it.
Transaction transaction;
@@ -394,7 +392,7 @@
longArray->SetWithoutChecks<true>(0, 1);
floatArray->SetWithoutChecks<true>(0, 1.0);
doubleArray->SetWithoutChecks<true>(0, 1.0);
- objectArray->SetWithoutChecks<true>(0, sirt_obj.get());
+ objectArray->SetWithoutChecks<true>(0, h_obj.Get());
Runtime::Current()->ExitTransactionMode();
transaction.Abort();
@@ -412,42 +410,41 @@
TEST_F(TransactionTest, EmptyClass) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(),
- "LTransaction$EmptyStatic;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$EmptyStatic;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
+ class_linker_->EnsureInitialized(h_klass, true, true);
Runtime::Current()->ExitTransactionMode();
ASSERT_FALSE(soa.Self()->IsExceptionPending());
}
TEST_F(TransactionTest, StaticFieldClass) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(),
- "LTransaction$StaticFieldClass;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$StaticFieldClass;",
+ class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
+ class_linker_->EnsureInitialized(h_klass, true, true);
Runtime::Current()->ExitTransactionMode();
ASSERT_FALSE(soa.Self()->IsExceptionPending());
}
@@ -455,39 +452,40 @@
TEST_F(TransactionTest, BlacklistedClass) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Transaction");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
// Load and verify java.lang.ExceptionInInitializerError and java.lang.InternalError which will
// be thrown by class initialization due to native call.
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/ExceptionInInitializerError;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
- sirt_klass.reset(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/InternalError;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(),
+ "Ljava/lang/ExceptionInInitializerError;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
+ h_klass.Assign(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/InternalError;"));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
// Load and verify Transaction$NativeSupport used in class initialization.
- sirt_klass.reset(class_linker_->FindClass(soa.Self(), "LTransaction$NativeSupport;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$NativeSupport;",
+ class_loader));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
- sirt_klass.reset(class_linker_->FindClass(soa.Self(), "LTransaction$BlacklistedClass;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$BlacklistedClass;",
+ class_loader));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
+ class_linker_->EnsureInitialized(h_klass, true, true);
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(soa.Self()->IsExceptionPending());
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index c332bdf..f26b598 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,7 +25,7 @@
#include <unistd.h>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index d425620..8a8834d 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -23,7 +23,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include <valgrind.h>
@@ -95,11 +95,12 @@
ScopedObjectAccess soa(Thread::Current());
EXPECT_EQ("null", PrettyTypeOf(NULL));
- SirtRef<mirror::String> s(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
- EXPECT_EQ("java.lang.String", PrettyTypeOf(s.get()));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::String> s(hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "")));
+ EXPECT_EQ("java.lang.String", PrettyTypeOf(s.Get()));
- SirtRef<mirror::ShortArray> a(soa.Self(), mirror::ShortArray::Alloc(soa.Self(), 2));
- EXPECT_EQ("short[]", PrettyTypeOf(a.get()));
+ Handle<mirror::ShortArray> a(hs.NewHandle(mirror::ShortArray::Alloc(soa.Self(), 2)));
+ EXPECT_EQ("short[]", PrettyTypeOf(a.Get()));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
ASSERT_TRUE(c != NULL);
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index c554394..62ecf4b 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -21,7 +21,7 @@
#include "method_verifier.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
namespace verifier {
@@ -39,11 +39,11 @@
}
inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
- return class_loader_->get();
+ return class_loader_->Get();
}
inline mirror::DexCache* MethodVerifier::GetDexCache() {
- return dex_cache_->get();
+ return dex_cache_->Get();
}
inline MethodReference MethodVerifier::GetMethodReference() const {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 41ff96e..9dd366d 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -40,7 +40,7 @@
#include "register_line-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "verifier/dex_gc_map.h"
namespace art {
@@ -115,15 +115,15 @@
}
return kHardFailure;
}
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, kh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, klass->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(kh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
return VerifyClass(&dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
}
MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
bool allow_soft_failures,
std::string* error) {
@@ -233,8 +233,8 @@
MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method,
@@ -278,8 +278,8 @@
void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method,
@@ -292,8 +292,8 @@
verifier.Dump(os);
}
-MethodVerifier::MethodVerifier(const DexFile* dex_file, SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader,
+MethodVerifier::MethodVerifier(const DexFile* dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
mirror::ArtMethod* method, uint32_t method_access_flags,
@@ -332,9 +332,9 @@
void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>& monitor_enter_dex_pcs) {
MethodHelper mh(m);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
true);
@@ -357,9 +357,9 @@
mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
uint32_t dex_pc) {
MethodHelper mh(m);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
true);
@@ -388,9 +388,9 @@
mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m,
uint32_t dex_pc) {
MethodHelper mh(m);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
true);
@@ -1834,7 +1834,7 @@
<< array_type;
} else {
const RegType& component_type = reg_types_.GetComponentType(array_type,
- class_loader_->get());
+ class_loader_->Get());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with component type "
@@ -2149,7 +2149,7 @@
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
- return_type = ®_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ return_type = ®_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
if (!return_type->IsLowHalf()) {
work_line_->SetResultRegisterType(*return_type);
@@ -2216,7 +2216,7 @@
*/
work_line_->MarkRefsAsInitialized(this_type);
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(),
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
return_type_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2242,7 +2242,7 @@
} else {
descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2300,7 +2300,7 @@
} else {
descriptor = MethodHelper(abs_method).GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2566,7 +2566,7 @@
mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != NULL) {
const char* descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2837,7 +2837,7 @@
const RegType& result =
klass != NULL ? reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes())
- : reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ : reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
if (result.IsConflict()) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
<< "' in " << referrer;
@@ -3093,7 +3093,7 @@
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (reg_type.IsIntegralTypes()) {
const RegType& src_type = work_line_->GetRegisterType(get_reg);
@@ -3218,7 +3218,7 @@
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
@@ -3262,7 +3262,7 @@
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
- const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->get());
+ const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
uint32_t arg[5];
if (!is_range) {
@@ -3304,7 +3304,7 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
} else {
/* verify the class */
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->get());
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
if (!component_type.IsReferenceTypes() && !is_primitive) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
<< " source for aget-object";
@@ -3381,7 +3381,7 @@
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->get());
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
const uint32_t vregA = inst->VRegA_23x();
if (is_primitive) {
VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3523,7 +3523,7 @@
if (field_type == nullptr) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- field_type = ®_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ field_type = ®_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
DCHECK(field_type != nullptr);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -3547,7 +3547,7 @@
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
<< "' but found type '" << *field_type
- << "' in get-object";
+ << "' in Get-object";
work_line_->SetRegisterType(vregA, reg_types_.Conflict());
return;
}
@@ -3590,7 +3590,7 @@
if (field_type == nullptr) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- field_type = ®_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ field_type = ®_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
DCHECK(field_type != nullptr);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -3666,7 +3666,7 @@
// compile time
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
<< " to be of type '" << insn_type
- << "' but found type '" << *field_type << "' in get";
+ << "' but found type '" << *field_type << "' in Get";
return;
}
} else {
@@ -3842,7 +3842,7 @@
const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
uint16_t return_type_idx = proto_id.return_type_idx_;
const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
- return_type_ = ®_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ return_type_ = ®_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
}
return *return_type_;
@@ -3858,7 +3858,7 @@
declaring_class_ = ®_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes());
} else {
- declaring_class_ = ®_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ declaring_class_ = ®_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
}
return *declaring_class_;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 5f13191..14200f7 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -33,12 +33,12 @@
#include "reg_type_cache-inl.h"
#include "register_line.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
struct ReferenceMap2Visitor;
-template<class T> class SirtRef;
+template<class T> class Handle;
namespace verifier {
@@ -142,15 +142,15 @@
/* Verify a class. Returns "kNoFailure" on success. */
static FailureKind VerifyClass(mirror::Class* klass, bool allow_soft_failures, std::string* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static FailureKind VerifyClass(const DexFile* dex_file, SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ static FailureKind VerifyClass(const DexFile* dex_file, Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
bool allow_soft_failures, std::string* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method, uint32_t method_access_flags)
@@ -205,8 +205,8 @@
return can_load_classes_;
}
- MethodVerifier(const DexFile* dex_file, SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader, const DexFile::ClassDef* class_def,
+ MethodVerifier(const DexFile* dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader, const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, uint32_t method_idx, mirror::ArtMethod* method,
uint32_t access_flags, bool can_load_classes, bool allow_soft_failures)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -255,8 +255,8 @@
* for code flow problems.
*/
static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def_idx,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method, uint32_t method_access_flags,
@@ -347,7 +347,7 @@
/* Ensure that the wide register index is valid for this code item. */
bool CheckWideRegisterIndex(uint32_t idx);
- // Perform static checks on a field get or set instruction. All we do here is ensure that the
+ // Perform static checks on a field Get or set instruction. All we do here is ensure that the
// field index is in the valid range.
bool CheckFieldIndex(uint32_t idx);
@@ -633,9 +633,9 @@
const RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
- SirtRef<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
+ Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
// The class loader for the declaring class of the method.
- SirtRef<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
+ Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index 9dca7f5..2bcf3e0 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -18,7 +18,7 @@
#include <stdio.h>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "class_linker.h"
#include "common_runtime_test.h"
#include "dex_file.h"
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 111e867..689a33e 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -142,15 +142,16 @@
// Try resolving class
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> class_loader(self, loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(loader));
mirror::Class* klass = NULL;
if (can_load_classes_) {
klass = class_linker->FindClass(self, descriptor, class_loader);
} else {
klass = class_linker->LookupClass(descriptor, loader);
- if (klass != NULL && !klass->IsLoaded()) {
+ if (klass != nullptr && !klass->IsLoaded()) {
// We found the class but without it being loaded its not safe for use.
- klass = NULL;
+ klass = nullptr;
}
}
return klass;
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 31b0113..a3e3e3b 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -94,7 +94,7 @@
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
return verifier_->GetRegTypeCache()->Conflict();
}
- /* get the element type of the array held in vsrc */
+ /* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
const RegType& this_type = GetRegisterType(this_reg);
if (!this_type.IsReferenceTypes()) {
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 8b2dadb..f9f3e31 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -22,7 +22,7 @@
#include "dex_instruction.h"
#include "reg_type.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
namespace verifier {
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 546eb40..fdc6e3f 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -40,6 +40,7 @@
jclass WellKnownClasses::java_lang_reflect_Proxy;
jclass WellKnownClasses::java_lang_RuntimeException;
jclass WellKnownClasses::java_lang_StackOverflowError;
+jclass WellKnownClasses::java_lang_String;
jclass WellKnownClasses::java_lang_System;
jclass WellKnownClasses::java_lang_Thread;
jclass WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler;
@@ -142,6 +143,7 @@
java_lang_reflect_Proxy = CacheClass(env, "java/lang/reflect/Proxy");
java_lang_RuntimeException = CacheClass(env, "java/lang/RuntimeException");
java_lang_StackOverflowError = CacheClass(env, "java/lang/StackOverflowError");
+ java_lang_String = CacheClass(env, "java/lang/String");
java_lang_System = CacheClass(env, "java/lang/System");
java_lang_Thread = CacheClass(env, "java/lang/Thread");
java_lang_Thread$UncaughtExceptionHandler = CacheClass(env, "java/lang/Thread$UncaughtExceptionHandler");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index bc928d0..f6c2930 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -54,6 +54,7 @@
static jclass java_lang_reflect_Proxy;
static jclass java_lang_RuntimeException;
static jclass java_lang_StackOverflowError;
+ static jclass java_lang_String;
static jclass java_lang_System;
static jclass java_lang_Thread;
static jclass java_lang_ThreadGroup;
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index ddac7d4..13b4219 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -26,7 +26,7 @@
#include "base/stringprintf.h"
#include "base/unix_file/fd_file.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 3ef0e6b..edaa88b 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -27,7 +27,7 @@
#include "mem_map.h"
#include "os.h"
#include "safe_map.h"
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index c43fee5..d0624cf 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -21,7 +21,7 @@
#include <sys/types.h>
#include <zlib.h>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "common_runtime_test.h"
#include "os.h"
diff --git a/test/111-unresolvable-exception/build b/test/111-unresolvable-exception/build
new file mode 100644
index 0000000..c21a9ef
--- /dev/null
+++ b/test/111-unresolvable-exception/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+rm classes/TestException.class
+
+${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/111-unresolvable-exception/expected.txt b/test/111-unresolvable-exception/expected.txt
new file mode 100644
index 0000000..052dd74
--- /dev/null
+++ b/test/111-unresolvable-exception/expected.txt
@@ -0,0 +1 @@
+Caught class java.lang.NoClassDefFoundError
diff --git a/test/111-unresolvable-exception/info.txt b/test/111-unresolvable-exception/info.txt
new file mode 100644
index 0000000..5ba3733
--- /dev/null
+++ b/test/111-unresolvable-exception/info.txt
@@ -0,0 +1,2 @@
+Test that we do not segfault when we check a catch handler
+for an unresolvable exception.
diff --git a/test/111-unresolvable-exception/src/Main.java b/test/111-unresolvable-exception/src/Main.java
new file mode 100644
index 0000000..ba07ee1
--- /dev/null
+++ b/test/111-unresolvable-exception/src/Main.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ static public void main(String[] args) throws Exception {
+ try {
+ check(false);
+ } catch (Throwable t) { // Should catch the NoClassDefFoundError
+ System.out.println("Caught " + t.getClass());
+ }
+ }
+
+ private static void check(boolean b) {
+ try {
+ if (b) { // Need this to not be dead code, but also not be invoked.
+ throwsTestException(); // TestException is checked, so we need something potentially
+ // throwing it.
+ }
+ throw new RuntimeException(); // Trigger exception handling.
+ } catch (TestException e) { // This handler will have an unresolvable class.
+ } catch (Exception e) { // General-purpose handler
+ System.out.println("Should not get here!");
+ }
+ }
+
+ // This avoids having to construct one explicitly, which won't work.
+ private static native void throwsTestException() throws TestException;
+}
diff --git a/test/111-unresolvable-exception/src/TestException.java b/test/111-unresolvable-exception/src/TestException.java
new file mode 100644
index 0000000..2d8b234
--- /dev/null
+++ b/test/111-unresolvable-exception/src/TestException.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestException extends Exception {
+}
diff --git a/test/Android.mk b/test/Android.mk
index aacd7b4..9b79abf 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -78,7 +78,7 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
include $(BUILD_JAVA_LIBRARY)
-
+
ART_TEST_TARGET_DEX_FILES += $$(LOCAL_INSTALLED_MODULE)
ART_TEST_TARGET_DEX_FILES$(ART_PHONY_TEST_TARGET_SUFFIX) += $$(LOCAL_INSTALLED_MODULE)
@@ -88,7 +88,7 @@
# TODO: make this a simple copy
$(4)/$(1)-$(2).jar: $(3)/$(1)-$(2).jar $(4)
cp $$< $(4)/
- endif
+ endif
endif
ifeq ($(ART_BUILD_HOST),true)
@@ -108,6 +108,10 @@
$(foreach dir,$(TEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-test-dex,$(dir),$(ART_NATIVETEST_OUT),$(2ND_ART_NATIVETEST_OUT))))
$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval $(call build-art-test-dex,oat-test-dex,$(dir),$(ART_TEST_OUT),$(2ND_ART_TEST_OUT))))
+# Used outside the art project to get a list of the current tests
+ART_TEST_DEX_MAKE_TARGETS := $(addprefix art-test-dex-, $(TEST_DEX_DIRECTORIES))
+ART_TEST_OAT_MAKE_TARGETS := $(addprefix oat-test-dex-, $(TEST_OAT_DIRECTORIES))
+
# Rules to explicitly create 2nd-arch test directories, as we use a "cp" for them
# instead of BUILD_JAVA_LIBRARY
ifneq ($(2ND_ART_NATIVETEST_OUT),)
@@ -151,7 +155,7 @@
$(call declare-test-art-oat-targets-impl,$(1),)
$(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).odex: $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar $(HOST_CORE_IMG_OUT) | $(DEX2OATD)
- $(DEX2OATD) $(DEX2OAT_FLAGS) --runtime-arg -Xms16m --runtime-arg -Xmx16m --boot-image=$(HOST_CORE_IMG_OUT) --dex-file=$(PWD)/$$< --oat-file=$(PWD)/$$@ --instruction-set=$(ART_HOST_ARCH) --host --android-root=$(HOST_OUT)
+ $(DEX2OATD) $(DEX2OAT_FLAGS) --runtime-arg -Xms16m --runtime-arg -Xmx16m --boot-image=$(HOST_CORE_IMG_OUT) --dex-file=$$(realpath $$<) --oat-file=$$(realpath $(HOST_OUT_JAVA_LIBRARIES))/oat-test-dex-$(1).odex --instruction-set=$(ART_HOST_ARCH) --host --android-root=$(HOST_OUT)
.PHONY: test-art-host-oat-default-$(1)
test-art-host-oat-default-$(1): $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).odex test-art-host-dependencies
@@ -159,7 +163,7 @@
ANDROID_DATA=/tmp/android-data/test-art-host-oat-default-$(1) \
ANDROID_ROOT=$(HOST_OUT) \
LD_LIBRARY_PATH=$(HOST_OUT_SHARED_LIBRARIES) \
- $(HOST_OUT_EXECUTABLES)/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd.so -Ximage:$(shell pwd)/$(HOST_CORE_IMG_OUT) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd.so -Ximage:$$(realpath $(HOST_CORE_IMG_OUT)) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
&& echo test-art-host-oat-default-$(1) PASSED || (echo test-art-host-oat-default-$(1) FAILED && exit 1)
$(hide) rm -r /tmp/android-data/test-art-host-oat-default-$(1)
@@ -169,7 +173,7 @@
ANDROID_DATA=/tmp/android-data/test-art-host-oat-interpreter-$(1) \
ANDROID_ROOT=$(HOST_OUT) \
LD_LIBRARY_PATH=$(HOST_OUT_SHARED_LIBRARIES) \
- $(HOST_OUT_EXECUTABLES)/dalvikvm -XXlib:libartd.so -Ximage:$(shell pwd)/$(HOST_CORE_IMG_OUT) -Xint -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm -XXlib:libartd.so -Ximage:$$(realpath $(HOST_CORE_IMG_OUT)) -Xint -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
&& echo test-art-host-oat-interpreter-$(1) PASSED || (echo test-art-host-oat-interpreter-$(1) FAILED && exit 1)
$(hide) rm -r /tmp/android-data/test-art-host-oat-interpreter-$(1)
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 180db4c..d8a0eef 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -16,7 +16,7 @@
#include <stdio.h>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "class_linker.h"
#include "dex_file-inl.h"
#include "gc_map.h"
diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc
index 528586e..d230ddd 100644
--- a/test/StackWalk/stack_walk_jni.cc
+++ b/test/StackWalk/stack_walk_jni.cc
@@ -16,7 +16,7 @@
#include <stdio.h>
-#include "UniquePtr.h"
+#include "UniquePtrCompat.h"
#include "class_linker.h"
#include "gc_map.h"
#include "mirror/art_method.h"