summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/driver/compiler_driver-inl.h2
-rw-r--r--compiler/jni/quick/jni_compiler.cc30
-rw-r--r--compiler/optimizing/code_generator_mips.cc212
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc5
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc49
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h19
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.cc39
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h19
-rw-r--r--compiler/utils/jni_macro_assembler.h61
-rw-r--r--compiler/utils/mips/assembler_mips.h30
-rw-r--r--compiler/utils/mips64/assembler_mips64.h31
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc50
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h21
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc46
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h19
-rw-r--r--runtime/art_field-inl.h4
-rw-r--r--runtime/base/mutex.cc11
-rw-r--r--runtime/base/mutex.h9
-rw-r--r--runtime/class_linker-inl.h2
-rw-r--r--runtime/class_linker.cc16
-rw-r--r--runtime/class_linker_test.cc6
-rw-r--r--runtime/common_runtime_test.cc4
-rw-r--r--runtime/common_throws.cc71
-rw-r--r--runtime/common_throws.h64
-rw-r--r--runtime/debugger.cc5
-rw-r--r--runtime/dex_file_annotations.cc2
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h10
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc9
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc4
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/reference_processor.cc14
-rw-r--r--runtime/gc/reference_processor.h11
-rw-r--r--runtime/gc/reference_queue.cc34
-rw-r--r--runtime/gc/reference_queue.h9
-rw-r--r--runtime/gc/reference_queue_test.cc4
-rw-r--r--runtime/indirect_reference_table_test.cc65
-rw-r--r--runtime/interpreter/interpreter.cc9
-rw-r--r--runtime/interpreter/interpreter_common.cc167
-rw-r--r--runtime/interpreter/interpreter_common.h18
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc1
-rw-r--r--runtime/interpreter/unstarted_runtime.cc17
-rw-r--r--runtime/java_vm_ext.cc44
-rw-r--r--runtime/java_vm_ext.h63
-rw-r--r--runtime/jni_internal.cc24
-rw-r--r--runtime/jvalue-inl.h32
-rw-r--r--runtime/jvalue.h9
-rw-r--r--runtime/method_handles-inl.h260
-rw-r--r--runtime/method_handles.h25
-rw-r--r--runtime/mirror/array-inl.h1
-rw-r--r--runtime/mirror/array.cc2
-rw-r--r--runtime/mirror/array.h3
-rw-r--r--runtime/mirror/class-inl.h78
-rw-r--r--runtime/mirror/class.cc250
-rw-r--r--runtime/mirror/class.h156
-rw-r--r--runtime/mirror/field-inl.h4
-rw-r--r--runtime/mirror/method.cc12
-rw-r--r--runtime/mirror/method_type.cc2
-rw-r--r--runtime/mirror/object_array-inl.h55
-rw-r--r--runtime/mirror/object_array.h43
-rw-r--r--runtime/mirror/object_reference-inl.h6
-rw-r--r--runtime/mirror/object_reference.h3
-rw-r--r--runtime/mirror/object_test.cc8
-rw-r--r--runtime/mirror/reference-inl.h20
-rw-r--r--runtime/mirror/reference.cc4
-rw-r--r--runtime/mirror/reference.h21
-rw-r--r--runtime/mirror/stack_trace_element.cc6
-rw-r--r--runtime/native/java_lang_Class.cc17
-rw-r--r--runtime/native/java_lang_System.cc7
-rw-r--r--runtime/native/java_lang_ref_Reference.cc4
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc2
-rw-r--r--runtime/native/java_lang_reflect_Executable.cc2
-rw-r--r--runtime/native/sun_misc_Unsafe.cc10
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc3
-rw-r--r--runtime/openjdkjvmti/heap.cc48
-rw-r--r--runtime/openjdkjvmti/heap.h2
-rw-r--r--runtime/proxy_test.cc4
-rw-r--r--runtime/reflection-inl.h2
-rw-r--r--runtime/reflection.cc2
-rw-r--r--runtime/reflection_test.cc47
-rw-r--r--runtime/scoped_thread_state_change-inl.h2
-rw-r--r--runtime/thread.cc2
-rw-r--r--runtime/thread.h4
-rwxr-xr-xtest/907-get-loaded-classes/build17
-rw-r--r--test/907-get-loaded-classes/expected.txt0
-rw-r--r--test/907-get-loaded-classes/get_loaded_classes.cc81
-rw-r--r--test/907-get-loaded-classes/get_loaded_classes.h30
-rw-r--r--test/907-get-loaded-classes/info.txt1
-rwxr-xr-xtest/907-get-loaded-classes/run43
-rw-r--r--test/907-get-loaded-classes/src/Main.java61
-rw-r--r--test/955-methodhandles-smali/expected.txt6
-rw-r--r--test/955-methodhandles-smali/smali/Main.smali132
-rw-r--r--test/Android.bp1
-rw-r--r--test/Android.run-test.mk3
93 files changed, 2200 insertions, 665 deletions
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index d807fcad96..971151665a 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -98,7 +98,7 @@ inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
DCHECK(!resolved_field->IsStatic());
ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
bool fast_get = referrer_class != nullptr &&
- referrer_class->CanAccessResolvedField(fields_class.Ptr(),
+ referrer_class->CanAccessResolvedField(fields_class,
resolved_field,
dex_cache,
field_idx);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index bfb342f966..13d8c166cc 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -276,10 +276,32 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
__ IncreaseFrameSize(main_out_arg_size);
// Call the read barrier for the declaring class loaded from the method for a static call.
+ // Skip this for @CriticalNative because we didn't build a HandleScope to begin with.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static && !is_critical_native) {
- // XX: Why is this necessary only for the jclass? Why not for every single object ref?
- // Skip this for @CriticalNative because we didn't build a HandleScope to begin with.
+ const bool kReadBarrierFastPath =
+ (instruction_set != kMips) && (instruction_set != kMips64);
+ std::unique_ptr<JNIMacroLabel> skip_cold_path_label;
+ if (kReadBarrierFastPath) {
+ skip_cold_path_label = __ CreateLabel();
+ // Fast path for supported targets.
+ //
+ // Check if gc_is_marking is set -- if it's not, we don't need
+ // a read barrier so skip it.
+ __ LoadFromThread(main_jni_conv->InterproceduralScratchRegister(),
+ Thread::IsGcMarkingOffset<kPointerSize>(),
+ Thread::IsGcMarkingSize());
+ // Jump over the slow path if gc is marking is false.
+ __ Jump(skip_cold_path_label.get(),
+ JNIMacroUnaryCondition::kZero,
+ main_jni_conv->InterproceduralScratchRegister());
+ }
+
+ // Construct slow path for read barrier:
+ //
+ // Call into the runtime's ReadBarrierJni and have it fix up
+ // the object address if it was moved.
+
ThreadOffset<kPointerSize> read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize,
pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
@@ -310,6 +332,10 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
__ CallFromThread(read_barrier, main_jni_conv->InterproceduralScratchRegister());
}
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset.
+
+ if (kReadBarrierFastPath) {
+ __ Bind(skip_cold_path_label.get());
+ }
}
// 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index bc8bb480ec..e336df8c6c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2255,6 +2255,11 @@ void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
case Primitive::kPrimShort:
case Primitive::kPrimChar:
case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2823,19 +2828,36 @@ void InstructionCodeGeneratorMIPS::GenerateIntCompare(IfCondition cond,
switch (cond) {
case kCondEQ:
case kCondNE:
- if (use_imm && IsUint<16>(rhs_imm)) {
- __ Xori(dst, lhs, rhs_imm);
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
+ if (use_imm && IsInt<16>(-rhs_imm)) {
+ if (rhs_imm == 0) {
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, lhs, 1);
+ } else {
+ __ Sltu(dst, ZERO, lhs);
+ }
+ } else {
+ __ Addiu(dst, lhs, -rhs_imm);
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
- __ Xor(dst, lhs, rhs_reg);
- }
- if (cond == kCondEQ) {
- __ Sltiu(dst, dst, 1);
} else {
- __ Sltu(dst, ZERO, dst);
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
}
break;
@@ -2941,7 +2963,7 @@ void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
Register lhs = locations->InAt(0).AsRegister<Register>();
Location rhs_location = locations->InAt(1);
Register rhs_reg = ZERO;
- int32_t rhs_imm = 0;
+ int64_t rhs_imm = 0;
bool use_imm = rhs_location.IsConstant();
if (use_imm) {
rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
@@ -2978,42 +3000,136 @@ void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
break;
}
} else {
- if (use_imm) {
- // TODO: more efficient comparison with 16-bit constants without loading them into TMP.
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- switch (cond) {
- case kCondEQ:
- __ Beq(lhs, rhs_reg, label);
- break;
- case kCondNE:
- __ Bne(lhs, rhs_reg, label);
- break;
- case kCondLT:
- __ Blt(lhs, rhs_reg, label);
- break;
- case kCondGE:
- __ Bge(lhs, rhs_reg, label);
- break;
- case kCondLE:
- __ Bge(rhs_reg, lhs, label);
- break;
- case kCondGT:
- __ Blt(rhs_reg, lhs, label);
- break;
- case kCondB:
- __ Bltu(lhs, rhs_reg, label);
- break;
- case kCondAE:
- __ Bgeu(lhs, rhs_reg, label);
- break;
- case kCondBE:
- __ Bgeu(rhs_reg, lhs, label);
- break;
- case kCondA:
- __ Bltu(rhs_reg, lhs, label);
- break;
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+ if (isR6 || !use_imm) {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ switch (cond) {
+ case kCondEQ:
+ __ Beq(lhs, rhs_reg, label);
+ break;
+ case kCondNE:
+ __ Bne(lhs, rhs_reg, label);
+ break;
+ case kCondLT:
+ __ Blt(lhs, rhs_reg, label);
+ break;
+ case kCondGE:
+ __ Bge(lhs, rhs_reg, label);
+ break;
+ case kCondLE:
+ __ Bge(rhs_reg, lhs, label);
+ break;
+ case kCondGT:
+ __ Blt(rhs_reg, lhs, label);
+ break;
+ case kCondB:
+ __ Bltu(lhs, rhs_reg, label);
+ break;
+ case kCondAE:
+ __ Bgeu(lhs, rhs_reg, label);
+ break;
+ case kCondBE:
+ __ Bgeu(rhs_reg, lhs, label);
+ break;
+ case kCondA:
+ __ Bltu(rhs_reg, lhs, label);
+ break;
+ }
+ } else {
+ // Special cases for more efficient comparison with constants on R2.
+ switch (cond) {
+ case kCondEQ:
+ __ LoadConst32(TMP, rhs_imm);
+ __ Beq(lhs, TMP, label);
+ break;
+ case kCondNE:
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bne(lhs, TMP, label);
+ break;
+ case kCondLT:
+ if (IsInt<16>(rhs_imm)) {
+ __ Slti(TMP, lhs, rhs_imm);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Blt(lhs, TMP, label);
+ }
+ break;
+ case kCondGE:
+ if (IsInt<16>(rhs_imm)) {
+ __ Slti(TMP, lhs, rhs_imm);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bge(lhs, TMP, label);
+ }
+ break;
+ case kCondLE:
+ if (IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(TMP, lhs, rhs_imm + 1);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bge(TMP, lhs, label);
+ }
+ break;
+ case kCondGT:
+ if (IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs > rhs via !(lhs < rhs + 1).
+ __ Slti(TMP, lhs, rhs_imm + 1);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Blt(TMP, lhs, label);
+ }
+ break;
+ case kCondB:
+ if (IsInt<16>(rhs_imm)) {
+ __ Sltiu(TMP, lhs, rhs_imm);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bltu(lhs, TMP, label);
+ }
+ break;
+ case kCondAE:
+ if (IsInt<16>(rhs_imm)) {
+ __ Sltiu(TMP, lhs, rhs_imm);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bgeu(lhs, TMP, label);
+ }
+ break;
+ case kCondBE:
+ if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ // Note that this only works if rhs + 1 does not overflow
+ // to 0, hence the check above.
+ __ Sltiu(TMP, lhs, rhs_imm + 1);
+ __ Bnez(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bgeu(TMP, lhs, label);
+ }
+ break;
+ case kCondA:
+ if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs > rhs via !(lhs < rhs + 1).
+ // Note that this only works if rhs + 1 does not overflow
+ // to 0, hence the check above.
+ __ Sltiu(TMP, lhs, rhs_imm + 1);
+ __ Beqz(TMP, label);
+ } else {
+ __ LoadConst32(TMP, rhs_imm);
+ __ Bltu(TMP, lhs, label);
+ }
+ break;
+ }
}
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 665d028338..5cabc8fa06 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -457,7 +457,8 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Register reg = obj_.AsRegister<Register>();
+ CpuRegister cpu_reg = obj_.AsRegister<CpuRegister>();
+ Register reg = cpu_reg.AsRegister();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
DCHECK(instruction_->IsInstanceFieldGet() ||
@@ -476,7 +477,7 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode {
__ Bind(GetEntryLabel());
if (unpoison_) {
// Object* ref = ref_addr->AsMirrorPtr()
- __ MaybeUnpoisonHeapReference(obj_.AsRegister<CpuRegister>());
+ __ MaybeUnpoisonHeapReference(cpu_reg);
}
// No need to save live registers; it's taken care of by the
// entrypoint. Also, there is no need to update the stack mask,
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 14d29c4f1a..8a9fd90c32 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -254,10 +254,10 @@ void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size
return Load(m_dst.AsArm(), sp, src.Int32Value(), size);
}
-void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst ATTRIBUTE_UNUSED,
- ThreadOffset32 src ATTRIBUTE_UNUSED,
- size_t size ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
+void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
+ ThreadOffset32 src,
+ size_t size) {
+ return Load(m_dst.AsArm(), tr, src.Int32Value(), size);
}
void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
@@ -558,6 +558,38 @@ void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t s
// TODO: think about using CBNZ here.
}
+std::unique_ptr<JNIMacroLabel> ArmVIXLJNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new ArmVIXLJNIMacroLabel());
+}
+
+void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ B(ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+}
+
+void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ ___ Cbz(test.AsArm().AsVIXLRegister(), ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ ___ Cbnz(test.AsArm().AsVIXLRegister(), ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+ break;
+ default:
+ LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+}
+
+void ArmVIXLJNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ Bind(ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
+}
+
void ArmVIXLJNIMacroAssembler::EmitExceptionPoll(
ArmVIXLJNIMacroAssembler::ArmException* exception) {
___ Bind(exception->Entry());
@@ -588,9 +620,14 @@ void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size) << dest;
} else if (dest.IsCoreRegister()) {
- CHECK_EQ(4u, size) << dest;
CHECK(!dest.AsVIXLRegister().Is(sp)) << dest;
- ___ Ldr(dest.AsVIXLRegister(), MemOperand(base, offset));
+
+ if (size == 1u) {
+ ___ Ldrb(dest.AsVIXLRegister(), MemOperand(base, offset));
+ } else {
+ CHECK_EQ(4u, size) << dest;
+ ___ Ldr(dest.AsVIXLRegister(), MemOperand(base, offset));
+ }
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size) << dest;
___ Ldr(dest.AsVIXLRegisterPairLow(), MemOperand(base, offset));
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 9fc683dd4f..f3baf1f062 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -187,6 +187,15 @@ class ArmVIXLJNIMacroAssembler FINAL
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
void EmitExceptionPoll(ArmVIXLJNIMacroAssembler::ArmException *exception);
@@ -219,6 +228,16 @@ class ArmVIXLJNIMacroAssembler FINAL
friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test;
};
+class ArmVIXLJNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<ArmVIXLJNIMacroLabel,
+ vixl32::Label,
+ kArm> {
+ public:
+ vixl32::Label* AsArm() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index dfdcd11893..9cd6884cbe 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -262,9 +262,12 @@ void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
} else if (dest.IsXRegister()) {
CHECK_NE(dest.AsXRegister(), SP) << dest;
- if (size == 4u) {
+
+ if (size == 1u) {
+ ___ Ldrb(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
+ } else if (size == 4u) {
___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
- } else {
+ } else {
CHECK_EQ(8u, size) << dest;
___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
}
@@ -627,6 +630,38 @@ void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t sta
___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
}
+std::unique_ptr<JNIMacroLabel> Arm64JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new Arm64JNIMacroLabel());
+}
+
+void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ B(Arm64JNIMacroLabel::Cast(label)->AsArm64());
+}
+
+void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ ___ Cbz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ ___ Cbnz(reg_x(test.AsArm64().AsXRegister()), Arm64JNIMacroLabel::Cast(label)->AsArm64());
+ break;
+ default:
+ LOG(FATAL) << "Not implemented unary condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+}
+
+void Arm64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ ___ Bind(Arm64JNIMacroLabel::Cast(label)->AsArm64());
+}
+
void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index b9f6854b01..264e99adab 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -168,6 +168,15 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
private:
class Arm64Exception {
public:
@@ -222,6 +231,16 @@ class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler,
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
};
+class Arm64JNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<Arm64JNIMacroLabel,
+ vixl::aarch64::Label,
+ kArm64> {
+ public:
+ vixl::aarch64::Label* AsArm64() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace arm64
} // namespace art
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 0119ae9bfb..59a1a48e20 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -35,6 +35,12 @@ class ArenaAllocator;
class DebugFrameOpCodeWriterForAssembler;
class InstructionSetFeatures;
class MemoryRegion;
+class JNIMacroLabel;
+
+enum class JNIMacroUnaryCondition {
+ kZero,
+ kNotZero
+};
template <PointerSize kPointerSize>
class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
@@ -193,6 +199,15 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
// and branch to a ExceptionSlowPath if it is.
virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+ // Create a new label that can be used with Jump/Bind calls.
+ virtual std::unique_ptr<JNIMacroLabel> CreateLabel() = 0;
+ // Emit an unconditional jump to the label.
+ virtual void Jump(JNIMacroLabel* label) = 0;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ virtual void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) = 0;
+ // Code at this offset will serve as the target for the Jump call.
+ virtual void Bind(JNIMacroLabel* label) = 0;
+
virtual ~JNIMacroAssembler() {}
/**
@@ -205,6 +220,28 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
explicit JNIMacroAssembler() {}
};
+// A "Label" class used with the JNIMacroAssembler
+// allowing one to use branches (jumping from one place to another).
+//
+// This is just an interface, so every platform must provide
+// its own implementation of it.
+//
+// It is only safe to use a label created
+// via JNIMacroAssembler::CreateLabel with that same macro assembler.
+class JNIMacroLabel {
+ public:
+ virtual ~JNIMacroLabel() = 0;
+
+ const InstructionSet isa_;
+ protected:
+ explicit JNIMacroLabel(InstructionSet isa) : isa_(isa) {}
+};
+
+inline JNIMacroLabel::~JNIMacroLabel() {
+ // Compulsory definition for a pure virtual destructor
+ // to avoid linking errors.
+}
+
template <typename T, PointerSize kPointerSize>
class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
public:
@@ -230,6 +267,30 @@ class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
T asm_;
};
+template <typename Self, typename PlatformLabel, InstructionSet kIsa>
+class JNIMacroLabelCommon : public JNIMacroLabel {
+ public:
+ static Self* Cast(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ CHECK_EQ(kIsa, label->isa_);
+
+ return reinterpret_cast<Self*>(label);
+ }
+
+ protected:
+ PlatformLabel* AsPlatformLabel() {
+ return &label_;
+ }
+
+ JNIMacroLabelCommon() : JNIMacroLabel(kIsa) {
+ }
+
+ virtual ~JNIMacroLabelCommon() OVERRIDE {}
+
+ private:
+ PlatformLabel label_;
+};
+
} // namespace art
#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index e1255f7f23..b932fb82bc 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -179,6 +179,8 @@ class MipsExceptionSlowPath {
class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
public:
+ using JNIBase = JNIMacroAssembler<PointerSize::k32>;
+
explicit MipsAssembler(ArenaAllocator* arena,
const MipsInstructionSetFeatures* instruction_set_features = nullptr)
: Assembler(arena),
@@ -723,6 +725,34 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
}
+ // Don't warn about a different virtual Bind/Jump in the base class.
+ using JNIBase::Bind;
+ using JNIBase::Jump;
+
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
+ JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
+ ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS32";
+ UNREACHABLE();
+ }
+
// Create a new literal with a given value.
// NOTE: Force the template parameter to be explicitly specified.
template <typename T>
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 6277b5d66d..238cb9d765 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -335,6 +335,8 @@ class Mips64ExceptionSlowPath {
class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
public:
+ using JNIBase = JNIMacroAssembler<PointerSize::k64>;
+
explicit Mips64Assembler(ArenaAllocator* arena)
: Assembler(arena),
overwriting_(false),
@@ -574,6 +576,35 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
}
void Bind(Mips64Label* label);
+
+ // Don't warn about a different virtual Bind/Jump in the base class.
+ using JNIBase::Bind;
+ using JNIBase::Jump;
+
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
+ JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
+ ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(FATAL) << "Not implemented on MIPS64";
+ UNREACHABLE();
+ }
+
void Bc(Mips64Label* label);
void Jialc(Mips64Label* label, GpuRegister indirect_reg);
void Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 77af885646..cfdf80ba50 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -215,8 +215,12 @@ void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
} else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ if (size == 1u) {
+ __ fs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src));
+ } else {
+ CHECK_EQ(4u, size);
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ }
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size);
__ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
@@ -519,6 +523,48 @@ void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t sta
__ j(kNotEqual, slow->Entry());
}
+std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new X86JNIMacroLabel());
+}
+
+void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ jmp(X86JNIMacroLabel::Cast(label)->AsX86());
+}
+
+void X86JNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ art::x86::Condition x86_cond;
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ x86_cond = art::x86::kZero;
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ x86_cond = art::x86::kNotZero;
+ break;
+ default:
+ LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+
+ // TEST reg, reg
+ // Jcc <Offset>
+ __ testl(test.AsX86().AsCpuRegister(), test.AsX86().AsCpuRegister());
+ __ j(x86_cond, X86JNIMacroLabel::Cast(label)->AsX86());
+
+
+ // X86 also has JCZX, JECZX, however it's not worth it to implement
+ // because we aren't likely to codegen with ECX+kZero check.
+}
+
+void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ Bind(X86JNIMacroLabel::Cast(label)->AsX86());
+}
+
#undef __
void X86ExceptionSlowPath::Emit(Assembler *sasm) {
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 015584cbc1..8ffda6425e 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -30,6 +30,8 @@
namespace art {
namespace x86 {
+class X86JNIMacroLabel;
+
class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
public:
explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
@@ -152,10 +154,29 @@ class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, Poi
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
private:
DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
};
+class X86JNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<X86JNIMacroLabel,
+ art::Label,
+ kX86> {
+ public:
+ art::Label* AsX86() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace x86
} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index 3e687a7758..ec86254cfc 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -260,8 +260,12 @@ void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
if (dest.IsNoRegister()) {
CHECK_EQ(0u, size);
} else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+ if (size == 1u) {
+ __ gs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src, true));
+ } else {
+ CHECK_EQ(4u, size);
+ __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+ }
} else if (dest.IsRegisterPair()) {
CHECK_EQ(8u, size);
__ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
@@ -585,6 +589,44 @@ void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t
__ j(kNotEqual, slow->Entry());
}
+std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
+ return std::unique_ptr<JNIMacroLabel>(new X86_64JNIMacroLabel());
+}
+
+void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ jmp(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
+}
+
+void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label,
+ JNIMacroUnaryCondition condition,
+ ManagedRegister test) {
+ CHECK(label != nullptr);
+
+ art::x86_64::Condition x86_64_cond;
+ switch (condition) {
+ case JNIMacroUnaryCondition::kZero:
+ x86_64_cond = art::x86_64::kZero;
+ break;
+ case JNIMacroUnaryCondition::kNotZero:
+ x86_64_cond = art::x86_64::kNotZero;
+ break;
+ default:
+ LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
+ UNREACHABLE();
+ }
+
+ // TEST reg, reg
+ // Jcc <Offset>
+ __ testq(test.AsX86_64().AsCpuRegister(), test.AsX86_64().AsCpuRegister());
+ __ j(x86_64_cond, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
+}
+
+void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
+ CHECK(label != nullptr);
+ __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
+}
+
#undef __
void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 9107f3c422..aa058f7454 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -180,10 +180,29 @@ class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assemble
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+ // Create a new label that can be used with Jump/Bind calls.
+ std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE;
+ // Emit an unconditional jump to the label.
+ void Jump(JNIMacroLabel* label) OVERRIDE;
+ // Emit a conditional jump to the label by applying a unary condition test to the register.
+ void Jump(JNIMacroLabel* label, JNIMacroUnaryCondition cond, ManagedRegister test) OVERRIDE;
+ // Code at this offset will serve as the target for the Jump call.
+ void Bind(JNIMacroLabel* label) OVERRIDE;
+
private:
DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
};
+class X86_64JNIMacroLabel FINAL
+ : public JNIMacroLabelCommon<X86_64JNIMacroLabel,
+ art::Label,
+ kX86_64> {
+ public:
+ art::Label* AsX86_64() {
+ return AsPlatformLabel();
+ }
+};
+
} // namespace x86_64
} // namespace art
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 3b24aab6b6..24bbcfbf0b 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -116,9 +116,9 @@ inline void ArtField::SetObj(ObjPtr<mirror::Object> object, ObjPtr<mirror::Objec
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
- object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value.Ptr());
+ object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
} else {
- object->SetFieldObject<kTransactionActive>(GetOffset(), new_value.Ptr());
+ object->SetFieldObject<kTransactionActive>(GetOffset(), new_value);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 1183dea7c4..e77e6d7170 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -64,6 +64,8 @@ Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
Uninterruptible Roles::uninterruptible_;
+ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
+Mutex* Locks::jni_weak_globals_lock_ = nullptr;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -1088,6 +1090,15 @@ void Locks::Init() {
DCHECK(reference_queue_soft_references_lock_ == nullptr);
reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
+ DCHECK(jni_globals_lock_ == nullptr);
+ jni_globals_lock_ =
+ new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
+ DCHECK(jni_weak_globals_lock_ == nullptr);
+ jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index b3ff6c20bc..e0cca7b0ce 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -68,6 +68,7 @@ enum LockLevel {
kMarkSweepMarkStackLock,
kTransactionLogLock,
kJniWeakGlobalsLock,
+ kJniGlobalsLock,
kReferenceQueueSoftReferencesLock,
kReferenceQueuePhantomReferencesLock,
kReferenceQueueFinalizerReferencesLock,
@@ -678,8 +679,14 @@ class Locks {
// Guards soft references queue.
static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+ // Guard accesses to the JNI Global Reference table.
+ static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+
+ // Guard accesses to the JNI Weak Global Reference table.
+ static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
+
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index fa971c4c2b..dba9b8fb48 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -200,7 +200,7 @@ inline ArtField* ClassLinker::ResolveField(uint32_t field_idx, ArtMethod* referr
inline mirror::Object* ClassLinker::AllocObject(Thread* self) {
return GetClassRoot(kJavaLangObject)->Alloc<true, false>(
self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
}
template <class T>
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0cf5231b0b..239cdaea69 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2146,7 +2146,7 @@ mirror::DexCache* ClassLinker::AllocDexCache(mirror::String** out_location,
const DexFile& dex_file) {
StackHandleScope<1> hs(self);
DCHECK(out_location != nullptr);
- auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>(
+ auto dex_cache(hs.NewHandle(ObjPtr<mirror::DexCache>::DownCast(
GetClassRoot(kJavaLangDexCache)->AllocObject(self))));
if (dex_cache.Get() == nullptr) {
self->AssertPendingOOMException();
@@ -4019,9 +4019,9 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
// Set the class access flags incl. VerificationAttempted, so we do not try to set the flag on
// the methods.
klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
- klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader).Ptr());
+ klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader));
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
- klass->SetName(soa.Decode<mirror::String>(name).Ptr());
+ klass->SetName(soa.Decode<mirror::String>(name));
klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
std::string descriptor(GetDescriptorForProxy(klass.Get()));
@@ -4566,7 +4566,7 @@ bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
MutableHandle<mirror::Class> handle_super_iface(hs.NewHandle<mirror::Class>(nullptr));
// First we initialize all of iface's super-interfaces recursively.
for (size_t i = 0; i < num_direct_ifaces; i++) {
- mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
+ ObjPtr<mirror::Class> super_iface = mirror::Class::GetDirectInterface(self, iface, i);
if (!super_iface->HasBeenRecursivelyInitialized()) {
// Recursive step
handle_super_iface.Assign(super_iface);
@@ -6302,7 +6302,7 @@ bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class>
size_t ifcount = super_ifcount + num_interfaces;
// Check that every class being implemented is an interface.
for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = have_interfaces
+ ObjPtr<mirror::Class> interface = have_interfaces
? interfaces->GetWithoutChecks(i)
: mirror::Class::GetDirectInterface(self, klass, i);
DCHECK(interface != nullptr);
@@ -6341,9 +6341,9 @@ bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class>
ScopedAssertNoThreadSuspension nts("Copying mirror::Class*'s for FillIfTable");
std::vector<mirror::Class*> to_add;
for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
+ ObjPtr<mirror::Class> interface = have_interfaces ? interfaces->Get(i) :
mirror::Class::GetDirectInterface(self, klass, i);
- to_add.push_back(interface);
+ to_add.push_back(interface.Ptr());
}
new_ifcount = FillIfTable(iftable.Get(), super_ifcount, std::move(to_add));
@@ -8101,7 +8101,7 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self,
mirror::Class::FindField(self, hs.NewHandle(h_path_class_loader->GetClass()), "parent",
"Ljava/lang/ClassLoader;");
DCHECK(parent_field != nullptr);
- mirror::Object* boot_cl =
+ ObjPtr<mirror::Object> boot_cl =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader)->AllocObject(self);
parent_field->SetObject<false>(h_path_class_loader.Get(), boot_cl);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e514112382..6279717acb 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -211,13 +211,13 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_TRUE(array->ShouldHaveEmbeddedVTable());
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != nullptr);
- mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
+ ObjPtr<mirror::Class> direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
EXPECT_TRUE(direct_interface0 != nullptr);
EXPECT_STREQ(direct_interface0->GetDescriptor(&temp), "Ljava/lang/Cloneable;");
- mirror::Class* direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
+ ObjPtr<mirror::Class> direct_interface1 = mirror::Class::GetDirectInterface(self, array, 1);
EXPECT_STREQ(direct_interface1->GetDescriptor(&temp), "Ljava/io/Serializable;");
mirror::Class* array_ptr = array->GetComponentType();
- EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
+ EXPECT_OBJ_PTR_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get());
PointerSize pointer_size = class_linker_->GetImagePointerSize();
mirror::Class* JavaLangObject =
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 84752f03c7..193f6ee7d9 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -514,9 +514,9 @@ std::vector<const DexFile*> CommonRuntimeTestImpl::GetDexFiles(jobject jclass_lo
soa.Decode<mirror::ClassLoader>(jclass_loader));
DCHECK_EQ(class_loader->GetClass(),
- soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader).Ptr());
+ soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader));
DCHECK_EQ(class_loader->GetParent()->GetClass(),
- soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader).Ptr());
+ soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 7fa8cf9326..0aa33c644e 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -28,14 +28,16 @@
#include "dex_instruction-inl.h"
#include "invoke_type.h"
#include "mirror/class-inl.h"
+#include "mirror/method_type.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "obj_ptr-inl.h"
#include "thread.h"
#include "verifier/method_verifier.h"
namespace art {
-static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
+static void AddReferrerLocation(std::ostream& os, ObjPtr<mirror::Class> referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (referrer != nullptr) {
std::string location(referrer->GetLocation());
@@ -47,7 +49,9 @@ static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
}
static void ThrowException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
+ ObjPtr<mirror::Class> referrer,
+ const char* fmt,
+ va_list* args = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
@@ -63,7 +67,9 @@ static void ThrowException(const char* exception_descriptor,
}
static void ThrowWrappedException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
+ ObjPtr<mirror::Class> referrer,
+ const char* fmt,
+ va_list* args = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
@@ -109,7 +115,8 @@ void ThrowArrayIndexOutOfBoundsException(int index, int length) {
// ArrayStoreException
-void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
+void ThrowArrayStoreException(ObjPtr<mirror::Class> element_class,
+ ObjPtr<mirror::Class> array_class) {
ThrowException("Ljava/lang/ArrayStoreException;", nullptr,
StringPrintf("%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
@@ -118,7 +125,7 @@ void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array
// ClassCastException
-void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
+void ThrowClassCastException(ObjPtr<mirror::Class> dest_type, ObjPtr<mirror::Class> src_type) {
ThrowException("Ljava/lang/ClassCastException;", nullptr,
StringPrintf("%s cannot be cast to %s",
PrettyDescriptor(src_type).c_str(),
@@ -131,13 +138,13 @@ void ThrowClassCastException(const char* msg) {
// ClassCircularityError
-void ThrowClassCircularityError(mirror::Class* c) {
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c) {
std::ostringstream msg;
msg << PrettyDescriptor(c);
ThrowException("Ljava/lang/ClassCircularityError;", c, msg.str().c_str());
}
-void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...) {
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/ClassCircularityError;", c, fmt, &args);
@@ -146,7 +153,7 @@ void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...) {
// ClassFormatError
-void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowClassFormatError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/ClassFormatError;", referrer, fmt, &args);
@@ -155,14 +162,15 @@ void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) {
// IllegalAccessError
-void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) {
+void ThrowIllegalAccessErrorClass(ObjPtr<mirror::Class> referrer, ObjPtr<mirror::Class> accessed) {
std::ostringstream msg;
msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '"
<< PrettyDescriptor(accessed) << "'";
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
+void ThrowIllegalAccessErrorClassForMethodDispatch(ObjPtr<mirror::Class> referrer,
+ ObjPtr<mirror::Class> accessed,
ArtMethod* called,
InvokeType type) {
std::ostringstream msg;
@@ -172,14 +180,14 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirr
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed) {
+void ThrowIllegalAccessErrorMethod(ObjPtr<mirror::Class> referrer, ArtMethod* accessed) {
std::ostringstream msg;
msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
-void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) {
+void ThrowIllegalAccessErrorField(ObjPtr<mirror::Class> referrer, ArtField* accessed) {
std::ostringstream msg;
msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
@@ -195,7 +203,7 @@ void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
msg.str().c_str());
}
-void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowIllegalAccessError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/IllegalAccessError;", referrer, fmt, &args);
@@ -228,8 +236,8 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun
}
void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
- mirror::Class* target_class,
- mirror::Object* this_object,
+ ObjPtr<mirror::Class> target_class,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
@@ -244,7 +252,7 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
}
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
- mirror::Object* this_object,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
@@ -269,7 +277,7 @@ void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_st
msg.str().c_str());
}
-void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowIncompatibleClassChangeError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args);
@@ -303,14 +311,14 @@ void ThrowWrappedIOException(const char* fmt, ...) {
// LinkageError
-void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/LinkageError;", referrer, fmt, &args);
va_end(args);
}
-void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowWrappedLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowWrappedException("Ljava/lang/LinkageError;", referrer, fmt, &args);
@@ -330,7 +338,7 @@ void ThrowNegativeArraySizeException(const char* msg) {
// NoSuchFieldError
-void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
+void ThrowNoSuchFieldError(const StringPiece& scope, ObjPtr<mirror::Class> c,
const StringPiece& type, const StringPiece& name) {
std::ostringstream msg;
std::string temp;
@@ -339,7 +347,7 @@ void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
ThrowException("Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
}
-void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) {
+void ThrowNoSuchFieldException(ObjPtr<mirror::Class> c, const StringPiece& name) {
std::ostringstream msg;
std::string temp;
msg << "No field " << name << " in class " << c->GetDescriptor(&temp);
@@ -348,7 +356,7 @@ void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) {
// NoSuchMethodError
-void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
+void ThrowNoSuchMethodError(InvokeType type, ObjPtr<mirror::Class> c, const StringPiece& name,
const Signature& signature) {
std::ostringstream msg;
std::string temp;
@@ -378,7 +386,7 @@ static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type) {
- mirror::DexCache* dex_cache =
+ ObjPtr<mirror::DexCache> dex_cache =
Thread::Current()->GetCurrentMethod(nullptr)->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
ThrowNullPointerExceptionForMethodAccessImpl(method_idx, dex_file, type);
@@ -386,7 +394,7 @@ void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type) {
- mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
+ ObjPtr<mirror::DexCache> dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
ThrowNullPointerExceptionForMethodAccessImpl(method->GetDexMethodIndex(),
dex_file, type);
@@ -784,11 +792,24 @@ void ThrowStringIndexOutOfBoundsException(int index, int length) {
// VerifyError
-void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) {
+void ThrowVerifyError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
ThrowException("Ljava/lang/VerifyError;", referrer, fmt, &args);
va_end(args);
}
+// WrongMethodTypeException
+
+void ThrowWrongMethodTypeException(mirror::MethodType* callee_type,
+ mirror::MethodType* callsite_type) {
+ // TODO(narayan): Should we provide more detail here ? The RI doesn't bother.
+ UNUSED(callee_type);
+ UNUSED(callsite_type);
+
+ ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",
+ nullptr,
+ "Invalid method type for signature polymorphic call");
+}
+
} // namespace art
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 945dc2daba..5d0bc1211e 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -19,11 +19,13 @@
#include "base/mutex.h"
#include "invoke_type.h"
+#include "obj_ptr.h"
namespace art {
namespace mirror {
class Class;
class Object;
+ class MethodType;
} // namespace mirror
class ArtField;
class ArtMethod;
@@ -50,20 +52,21 @@ void ThrowArrayIndexOutOfBoundsException(int index, int length)
// ArrayStoreException
-void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class)
+void ThrowArrayStoreException(ObjPtr<mirror::Class> element_class,
+ ObjPtr<mirror::Class> array_class)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCircularityError
-void ThrowClassCircularityError(mirror::Class* c)
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...)
+void ThrowClassCircularityError(ObjPtr<mirror::Class> c, const char* fmt, ...)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCastException
-void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
+void ThrowClassCastException(ObjPtr<mirror::Class> dest_type, ObjPtr<mirror::Class> src_type)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCastException(const char* msg)
@@ -71,30 +74,31 @@ void ThrowClassCastException(const char* msg)
// ClassFormatError
-void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowClassFormatError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessError
-void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
+void ThrowIllegalAccessErrorClass(ObjPtr<mirror::Class> referrer, ObjPtr<mirror::Class> accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
+void ThrowIllegalAccessErrorClassForMethodDispatch(ObjPtr<mirror::Class> referrer,
+ ObjPtr<mirror::Class> accessed,
ArtMethod* called,
InvokeType type)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed)
+void ThrowIllegalAccessErrorMethod(ObjPtr<mirror::Class> referrer, ArtMethod* accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
+void ThrowIllegalAccessErrorField(ObjPtr<mirror::Class> referrer, ArtField* accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowIllegalAccessError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -110,26 +114,29 @@ void ThrowIllegalArgumentException(const char* msg)
// IncompatibleClassChangeError
-void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
- ArtMethod* method, ArtMethod* referrer)
+void ThrowIncompatibleClassChangeError(InvokeType expected_type,
+ InvokeType found_type,
+ ArtMethod* method,
+ ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
- mirror::Class* target_class,
- mirror::Object* this_object,
+ ObjPtr<mirror::Class> target_class,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
- mirror::Object* this_object,
+ ObjPtr<mirror::Object> this_object,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
+void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field,
+ bool is_static,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowIncompatibleClassChangeError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -146,11 +153,11 @@ void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__p
// LinkageError
-void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowWrappedLinkageError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -165,16 +172,20 @@ void ThrowNegativeArraySizeException(const char* msg)
// NoSuchFieldError
-void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
- const StringPiece& type, const StringPiece& name)
+void ThrowNoSuchFieldError(const StringPiece& scope,
+ ObjPtr<mirror::Class> c,
+ const StringPiece& type,
+ const StringPiece& name)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
+void ThrowNoSuchFieldException(ObjPtr<mirror::Class> c, const StringPiece& name)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NoSuchMethodError
-void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
+void ThrowNoSuchMethodError(InvokeType type,
+ ObjPtr<mirror::Class> c,
+ const StringPiece& name,
const Signature& signature)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
@@ -215,10 +226,15 @@ void ThrowStringIndexOutOfBoundsException(int index, int length)
// VerifyError
-void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
+void ThrowVerifyError(ObjPtr<mirror::Class> referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+// WrontMethodTypeException
+void ThrowWrongMethodTypeException(mirror::MethodType* callee_type,
+ mirror::MethodType* callsite_type)
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
} // namespace art
#endif // ART_RUNTIME_COMMON_THROWS_H_
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index ada1a237d6..3977e4926a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -39,6 +39,7 @@
#include "handle_scope.h"
#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
+#include "jvalue-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -1289,7 +1290,7 @@ JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_
return error;
}
Thread* self = Thread::Current();
- mirror::Object* new_object;
+ ObjPtr<mirror::Object> new_object;
if (c->IsStringClass()) {
// Special case for java.lang.String.
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
@@ -1304,7 +1305,7 @@ JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_
*new_object_id = 0;
return JDWP::ERR_OUT_OF_MEMORY;
}
- *new_object_id = gRegistry->Add(new_object);
+ *new_object_id = gRegistry->Add(new_object.Ptr());
return JDWP::ERR_NONE;
}
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index f0d3909bff..576c4aa849 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -22,7 +22,7 @@
#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
#include "mirror/field.h"
#include "mirror/method.h"
#include "reflection.h"
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index e37db7dd92..8077c21bdc 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -231,10 +231,10 @@ inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
// CheckObjectAlloc can cause thread suspension which means we may now be instrumented.
return klass->Alloc</*kInstrumented*/true>(
self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr();
}
DCHECK(klass != nullptr);
- return klass->Alloc<kInstrumented>(self, allocator_type);
+ return klass->Alloc<kInstrumented>(self, allocator_type).Ptr();
}
// Given the context of a calling Method and a resolved class, create an instance.
@@ -254,10 +254,10 @@ inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
// Pass in false since the object cannot be finalizable.
// CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
// instrumented.
- return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator());
+ return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator()).Ptr();
}
// Pass in false since the object cannot be finalizable.
- return klass->Alloc<kInstrumented, false>(self, allocator_type);
+ return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
}
// Given the context of a calling Method and an initialized class, create an instance.
@@ -268,7 +268,7 @@ inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
// Pass in false since the object cannot be finalizable.
- return klass->Alloc<kInstrumented, false>(self, allocator_type);
+ return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr();
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 20fa0d8e6b..383cdd256d 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -24,6 +24,15 @@ namespace art {
extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
Thread* self ATTRIBUTE_UNUSED) {
+ DCHECK(kUseReadBarrier);
+ if (kUseBakerReadBarrier) {
+ DCHECK(handle_on_stack->AsMirrorPtr() != nullptr)
+ << "The class of a static jni call must not be null";
+ // Check the mark bit and return early if it's already marked.
+ if (LIKELY(handle_on_stack->AsMirrorPtr()->GetMarkBit() != 0)) {
+ return;
+ }
+ }
// Call the read barrier and update the handle.
mirror::Object* to_ref = ReadBarrier::BarrierForRoot(handle_on_stack);
handle_on_stack->Assign(to_ref);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 126e26ccf5..750efac977 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -834,7 +834,7 @@ void BuildQuickArgumentVisitor::Visit() {
void BuildQuickArgumentVisitor::FixupReferences() {
// Fixup any references which may have changed.
for (const auto& pair : references_) {
- pair.second->Assign(soa_->Decode<mirror::Object>(pair.first).Ptr());
+ pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
soa_->Env()->DeleteLocalRef(pair.first);
}
}
@@ -926,7 +926,7 @@ void RememberForGcArgumentVisitor::Visit() {
void RememberForGcArgumentVisitor::FixupReferences() {
// Fixup any references which may have changed.
for (const auto& pair : references_) {
- pair.second->Assign(soa_->Decode<mirror::Object>(pair.first).Ptr());
+ pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
soa_->Env()->DeleteLocalRef(pair.first);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index db90a2a616..bf5af8ee7e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3702,7 +3702,7 @@ void Heap::AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object) {
args[0].l = arg.get();
InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_FinalizerReference_add, args);
// Restore object in case it gets moved.
- *object = soa.Decode<mirror::Object>(arg.get()).Ptr();
+ *object = soa.Decode<mirror::Object>(arg.get());
}
void Heap::RequestConcurrentGCAndSaveObject(Thread* self,
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 96945978af..4b8f38d709 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,12 +60,13 @@ void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
condition_.Broadcast(self);
}
-mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
+ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
+ ObjPtr<mirror::Reference> reference) {
if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
// Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
// weak ref access is disabled as the call includes a read barrier which may push a ref onto the
// mark stack and interfere with termination of marking.
- mirror::Object* const referent = reference->GetReferent();
+ ObjPtr<mirror::Object> const referent = reference->GetReferent();
// If the referent is null then it is already cleared, we can just return null since there is no
// scenario where it becomes non-null during the reference processing phase.
if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
@@ -116,7 +117,8 @@ void ReferenceProcessor::StopPreservingReferences(Thread* self) {
}
// Process reference class instances and schedule finalizations.
-void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
+void ReferenceProcessor::ProcessReferences(bool concurrent,
+ TimingLogger* timings,
bool clear_soft_references,
collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
@@ -188,7 +190,8 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector) {
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK(klass != nullptr);
@@ -260,7 +263,8 @@ void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
}
}
-bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(
+ ObjPtr<mirror::FinalizerReference> reference) {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::reference_processor_lock_);
// Wait untul we are done processing reference.
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 4788f8a3c0..759b7e129c 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -46,7 +46,9 @@ class Heap;
class ReferenceProcessor {
public:
explicit ReferenceProcessor();
- void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
+ void ProcessReferences(bool concurrent,
+ TimingLogger* timings,
+ bool clear_soft_references,
gc::collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_)
@@ -57,16 +59,17 @@ class ReferenceProcessor {
void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_);
void BroadcastForSlowPath(Thread* self);
// Decode the referent, may block if references are being processed.
- mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
+ ObjPtr<mirror::Object> GetReferent(Thread* self, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
- bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+ bool MakeCircularListIfUnenqueued(ObjPtr<mirror::FinalizerReference> reference)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 62625c41b4..4e6f7da5f0 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -29,7 +29,7 @@ namespace gc {
ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
-void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (ref->IsUnprocessed()) {
@@ -37,16 +37,16 @@ void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference*
}
}
-void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
+void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
CHECK(ref->IsUnprocessed());
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
- list_ = ref;
+ list_ = ref.Ptr();
} else {
// The list is owned by the GC, everything that has been inserted must already be at least
// gray.
- mirror::Reference* head = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(head != nullptr);
ref->SetPendingNext(head);
}
@@ -54,16 +54,16 @@ void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
list_->SetPendingNext(ref);
}
-mirror::Reference* ReferenceQueue::DequeuePendingReference() {
+ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- mirror::Reference* ref = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(ref != nullptr);
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
if (list_ == ref) {
list_ = nullptr;
} else {
- mirror::Reference* next = ref->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
list_->SetPendingNext(next);
}
ref->SetPendingNext(nullptr);
@@ -83,10 +83,10 @@ mirror::Reference* ReferenceQueue::DequeuePendingReference() {
// In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
// find it here, which is OK.
CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
+ ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
// The referent could be null if it's cleared by a mutator (Reference.clear()).
if (referent != nullptr) {
- CHECK(concurrent_copying->IsInToSpace(referent))
+ CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
<< "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer()
<< " referent=" << referent;
}
@@ -96,13 +96,13 @@ mirror::Reference* ReferenceQueue::DequeuePendingReference() {
}
void ReferenceQueue::Dump(std::ostream& os) const {
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
os << "Reference starting at list_=" << list_ << "\n";
if (cur == nullptr) {
return;
}
do {
- mirror::Reference* pending_next = cur->GetPendingNext();
+ ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
os << "Reference= " << cur << " PendingNext=" << pending_next;
if (cur->IsFinalizerReferenceInstance()) {
os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
@@ -114,7 +114,7 @@ void ReferenceQueue::Dump(std::ostream& os) const {
size_t ReferenceQueue::GetLength() const {
size_t count = 0;
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
if (cur != nullptr) {
do {
++count;
@@ -127,7 +127,7 @@ size_t ReferenceQueue::GetLength() const {
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::Reference* ref = DequeuePendingReference();
+ ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
@@ -145,11 +145,11 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
+ ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
- mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
+ ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
ref->SetZombie<true>(forward_address);
@@ -167,8 +167,8 @@ void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
if (UNLIKELY(IsEmpty())) {
return;
}
- mirror::Reference* const head = list_;
- mirror::Reference* ref = head;
+ ObjPtr<mirror::Reference> const head = list_;
+ ObjPtr<mirror::Reference> ref = head;
do {
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr) {
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 1de1aa11db..b5ec1e5341 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -26,6 +26,7 @@
#include "base/timing_logger.h"
#include "globals.h"
#include "jni.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "thread_pool.h"
@@ -54,15 +55,15 @@ class ReferenceQueue {
// Enqueue a reference if it is unprocessed. Thread safe to call from multiple
// threads since it uses a lock to avoid a race between checking for the references presence and
// adding it.
- void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
+ void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
// Enqueue a reference. The reference must be unprocessed.
// Not thread safe, used when mutators are paused to minimize lock overhead.
- void EnqueueReference(mirror::Reference* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
// Dequeue a reference from the queue and return that dequeued reference.
- mirror::Reference* DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
@@ -104,7 +105,7 @@ class ReferenceQueue {
// calling AtomicEnqueueIfNotEnqueued.
Mutex* const lock_;
// The actual reference list. Only a root for the mark compact GC since it will be null for other
- // GC types.
+ // GC types. Not an ObjPtr since it is accessed from multiple threads.
mirror::Reference* list_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index 5b8a3c2963..3ca3353562 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -52,10 +52,10 @@ TEST_F(ReferenceQueueTest, EnqueueDequeue) {
std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()};
std::set<mirror::Reference*> dequeued;
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_EQ(queue.GetLength(), 0U);
ASSERT_TRUE(queue.IsEmpty());
ASSERT_EQ(refs, dequeued);
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index 0380f3ee7c..169911077e 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -53,15 +53,16 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
IndirectReferenceTable irt(kTableInitial, kTableMax, kGlobal);
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+ StackHandleScope<4> hs(soa.Self());
ASSERT_TRUE(c != nullptr);
- mirror::Object* obj0 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj0 != nullptr);
- mirror::Object* obj1 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj1 != nullptr);
- mirror::Object* obj2 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj2 != nullptr);
- mirror::Object* obj3 = c->AllocObject(soa.Self());
- ASSERT_TRUE(obj3 != nullptr);
+ Handle<mirror::Object> obj0 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj0.Get() != nullptr);
+ Handle<mirror::Object> obj1 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj1.Get() != nullptr);
+ Handle<mirror::Object> obj2 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj2.Get() != nullptr);
+ Handle<mirror::Object> obj3 = hs.NewHandle(c->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj3.Get() != nullptr);
const uint32_t cookie = IRT_FIRST_SEGMENT;
@@ -71,19 +72,19 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
EXPECT_FALSE(irt.Remove(cookie, iref0)) << "unexpectedly successful removal";
// Add three, check, remove in the order in which they were added.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
- IndirectRef iref1 = irt.Add(cookie, obj1);
+ IndirectRef iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
CheckDump(&irt, 2, 2);
- IndirectRef iref2 = irt.Add(cookie, obj2);
+ IndirectRef iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
CheckDump(&irt, 3, 3);
- EXPECT_OBJ_PTR_EQ(obj0, irt.Get(iref0));
- EXPECT_OBJ_PTR_EQ(obj1, irt.Get(iref1));
- EXPECT_OBJ_PTR_EQ(obj2, irt.Get(iref2));
+ EXPECT_OBJ_PTR_EQ(obj0.Get(), irt.Get(iref0));
+ EXPECT_OBJ_PTR_EQ(obj1.Get(), irt.Get(iref1));
+ EXPECT_OBJ_PTR_EQ(obj2.Get(), irt.Get(iref2));
EXPECT_TRUE(irt.Remove(cookie, iref0));
CheckDump(&irt, 2, 2);
@@ -99,11 +100,11 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
EXPECT_TRUE(irt.Get(iref0) == nullptr);
// Add three, remove in the opposite order.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
- iref2 = irt.Add(cookie, obj2);
+ iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
CheckDump(&irt, 3, 3);
@@ -119,11 +120,11 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Add three, remove middle / middle / bottom / top. (Second attempt
// to remove middle should fail.)
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
- iref2 = irt.Add(cookie, obj2);
+ iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
CheckDump(&irt, 3, 3);
@@ -148,20 +149,20 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Add four entries. Remove #1, add new entry, verify that table size
// is still 4 (i.e. holes are getting filled). Remove #1 and #3, verify
// that we delete one and don't hole-compact the other.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
- iref2 = irt.Add(cookie, obj2);
+ iref2 = irt.Add(cookie, obj2.Get());
EXPECT_TRUE(iref2 != nullptr);
- IndirectRef iref3 = irt.Add(cookie, obj3);
+ IndirectRef iref3 = irt.Add(cookie, obj3.Get());
EXPECT_TRUE(iref3 != nullptr);
CheckDump(&irt, 4, 4);
ASSERT_TRUE(irt.Remove(cookie, iref1));
CheckDump(&irt, 3, 3);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
ASSERT_EQ(4U, irt.Capacity()) << "hole not filled";
@@ -184,12 +185,12 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Add an entry, remove it, add a new entry, and try to use the original
// iref. They have the same slot number but are for different objects.
// With the extended checks in place, this should fail.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_TRUE(irt.Remove(cookie, iref0));
CheckDump(&irt, 0, 0);
- iref1 = irt.Add(cookie, obj1);
+ iref1 = irt.Add(cookie, obj1.Get());
EXPECT_TRUE(iref1 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_FALSE(irt.Remove(cookie, iref0)) << "mismatched del succeeded";
@@ -200,12 +201,12 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// Same as above, but with the same object. A more rigorous checker
// (e.g. with slot serialization) will catch this.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_TRUE(irt.Remove(cookie, iref0));
CheckDump(&irt, 0, 0);
- iref1 = irt.Add(cookie, obj0);
+ iref1 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref1 != nullptr);
CheckDump(&irt, 1, 1);
if (iref0 != iref1) {
@@ -220,7 +221,7 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
ASSERT_TRUE(irt.Get(nullptr) == nullptr);
// Stale lookup.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
EXPECT_TRUE(iref0 != nullptr);
CheckDump(&irt, 1, 1);
ASSERT_TRUE(irt.Remove(cookie, iref0));
@@ -231,12 +232,12 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
// These ones fit...
IndirectRef manyRefs[kTableInitial];
for (size_t i = 0; i < kTableInitial; i++) {
- manyRefs[i] = irt.Add(cookie, obj0);
+ manyRefs[i] = irt.Add(cookie, obj0.Get());
ASSERT_TRUE(manyRefs[i] != nullptr) << "Failed adding " << i;
CheckDump(&irt, i + 1, 1);
}
// ...this one causes overflow.
- iref0 = irt.Add(cookie, obj0);
+ iref0 = irt.Add(cookie, obj0.Get());
ASSERT_TRUE(iref0 != nullptr);
ASSERT_EQ(kTableInitial + 1, irt.Capacity());
CheckDump(&irt, kTableInitial + 1, 1);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d283a50234..f3cd25cd73 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -22,6 +22,7 @@
#include "interpreter_common.h"
#include "interpreter_mterp_impl.h"
#include "interpreter_switch_impl.h"
+#include "jvalue-inl.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "ScopedLocalRef.h"
@@ -51,7 +52,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), klass.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jclass);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -93,7 +94,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), klass.get(), arg0.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
} else if (shorty == "IIZ") {
typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -191,7 +192,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), rcvr.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -212,7 +213,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), rcvr.get(), arg0.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index db7ebb4f00..191ffcc776 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -24,6 +24,7 @@
#include "jit/jit.h"
#include "jvalue.h"
#include "method_handles.h"
+#include "method_handles-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class.h"
#include "mirror/method_handle_impl.h"
@@ -474,24 +475,6 @@ void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
UNREACHABLE();
}
-// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
-static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
- size_t dest_reg, size_t src_reg)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Uint required, so that sign extension does not make this wrong on 64b systems
- uint32_t src_value = shadow_frame.GetVReg(src_reg);
- mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
-
- // If both register locations contains the same value, the register probably holds a reference.
- // Note: As an optimization, non-moving collectors leave a stale reference value
- // in the references array even after the original vreg was overwritten to a non-reference.
- if (src_value == reinterpret_cast<uintptr_t>(o)) {
- new_shadow_frame->SetVRegReference(dest_reg, o);
- } else {
- new_shadow_frame->SetVReg(dest_reg, src_value);
- }
-}
-
void AbortTransactionF(Thread* self, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
@@ -519,6 +502,17 @@ static inline bool DoCallCommon(ArtMethod* called_method,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
uint32_t vregC) ALWAYS_INLINE;
+// Separate declaration is required solely for the attributes.
+template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
+static inline bool DoCallPolymorphic(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> target_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ JValue* result,
+ uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ uint32_t vregC) ALWAYS_INLINE;
+
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
const DexFile::CodeItem* code_item,
@@ -597,9 +591,10 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
// signature polymorphic method so that we disallow calls via invoke-polymorphic
// to non sig-poly methods. This would also have the side effect of verifying
// that vRegC really is a reference type.
- mirror::MethodHandleImpl* const method_handle =
- reinterpret_cast<mirror::MethodHandleImpl*>(shadow_frame.GetVRegReference(vRegC));
- if (UNLIKELY(method_handle == nullptr)) {
+ StackHandleScope<6> hs(self);
+ Handle<mirror::MethodHandleImpl> method_handle(hs.NewHandle(
+ reinterpret_cast<mirror::MethodHandleImpl*>(shadow_frame.GetVRegReference(vRegC))));
+ if (UNLIKELY(method_handle.Get() == nullptr)) {
const int method_idx = (is_range) ? inst->VRegB_4rcc() : inst->VRegB_45cc();
// Note that the invoke type is kVirtual here because a call to a signature
// polymorphic method is shaped like a virtual call at the bytecode level.
@@ -616,32 +611,29 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
// Call through to the classlinker and ask it to resolve the static type associated
// with the callsite. This information is stored in the dex cache so it's
// guaranteed to be fast after the first resolution.
- StackHandleScope<2> hs(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* caller_class = shadow_frame.GetMethod()->GetDeclaringClass();
- mirror::MethodType* callsite_type = class_linker->ResolveMethodType(
+ Handle<mirror::Class> caller_class(hs.NewHandle(shadow_frame.GetMethod()->GetDeclaringClass()));
+ Handle<mirror::MethodType> callsite_type(hs.NewHandle(class_linker->ResolveMethodType(
caller_class->GetDexFile(), callsite_proto_id,
hs.NewHandle<mirror::DexCache>(caller_class->GetDexCache()),
- hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader()));
+ hs.NewHandle<mirror::ClassLoader>(caller_class->GetClassLoader()))));
// This implies we couldn't resolve one or more types in this method handle.
- if (UNLIKELY(callsite_type == nullptr)) {
+ if (UNLIKELY(callsite_type.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
return false;
}
- const char* old_cause = self->StartAssertNoThreadSuspension("DoInvokePolymorphic");
-
// Get the method we're actually invoking along with the kind of
// invoke that is desired. We don't need to perform access checks at this
// point because they would have been performed on our behalf at the point
// of creation of the method handle.
ArtMethod* called_method = method_handle->GetTargetMethod();
const MethodHandleKind handle_kind = method_handle->GetHandleKind();
- mirror::MethodType* const handle_type = method_handle->GetMethodType();
+ Handle<mirror::MethodType> handle_type(hs.NewHandle(method_handle->GetMethodType()));
CHECK(called_method != nullptr);
- CHECK(handle_type != nullptr);
+ CHECK(handle_type.Get() != nullptr);
// We now have to massage the number of inputs to the target function.
// It's always one less than the number of inputs to the signature polymorphic
@@ -672,14 +664,12 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
called_method = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
called_method, kRuntimePointerSize);
if (!VerifyObjectIsClass(receiver, declaring_class)) {
- self->EndAssertNoThreadSuspension(old_cause);
return false;
}
} else if (handle_kind == kInvokeDirect) {
// TODO(narayan) : We need to handle the case where the target method is a
// constructor here. Also the case where we don't want to dynamically
// dispatch based on the type of the receiver.
- self->EndAssertNoThreadSuspension(old_cause);
UNIMPLEMENTED(FATAL) << "Direct invokes are not implemented yet.";
return false;
}
@@ -687,24 +677,123 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
// NOTE: handle_kind == kInvokeStatic needs no special treatment here. We
// can directly make the call. handle_kind == kInvokeSuper doesn't have any
// particular use and can probably be dropped.
- if (callsite_type->IsExactMatch(handle_type)) {
- self->EndAssertNoThreadSuspension(old_cause);
+
+ if (callsite_type->IsExactMatch(handle_type.Get())) {
return DoCallCommon<is_range, do_access_check>(
called_method, self, shadow_frame, result, number_of_inputs,
arg, receiver_vregC);
+ } else {
+ return DoCallPolymorphic<is_range>(
+ called_method, callsite_type, handle_type, self, shadow_frame,
+ result, arg, receiver_vregC);
}
-
- self->EndAssertNoThreadSuspension(old_cause);
- UNIMPLEMENTED(FATAL) << "Non exact invokes are not implemented yet.";
- return false;
} else {
// TODO(narayan): Implement field getters and setters.
- self->EndAssertNoThreadSuspension(old_cause);
UNIMPLEMENTED(FATAL) << "Field references in method handles are not implemented yet.";
return false;
}
}
+// Calculate the number of ins for a proxy or native method, where we
+// can't just look at the code item.
+static inline size_t GetInsForProxyOrNativeMethod(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(method->IsNative() || method->IsProxyMethod());
+
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ size_t num_ins = 0;
+ // Separate accounting for the receiver, which isn't a part of the
+ // shorty.
+ if (!method->IsStatic()) {
+ ++num_ins;
+ }
+
+ uint32_t shorty_len = 0;
+ const char* shorty = method->GetShorty(&shorty_len);
+ for (size_t i = 1; i < shorty_len; ++i) {
+ const char c = shorty[i];
+ ++num_ins;
+ if (c == 'J' || c == 'D') {
+ ++num_ins;
+ }
+ }
+
+ return num_ins;
+}
+
+template <bool is_range>
+static inline bool DoCallPolymorphic(ArtMethod* called_method,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> target_type,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ JValue* result,
+ uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ uint32_t vregC) {
+ // TODO(narayan): Wire in the String.init hacks.
+
+ // Compute method information.
+ const DexFile::CodeItem* code_item = called_method->GetCodeItem();
+
+ // Number of registers for the callee's call frame. Note that for non-exact
+ // invokes, we always derive this information from the callee method. We
+ // cannot guarantee during verification that the number of registers encoded
+ // in the invoke is equal to the number of ins for the callee. This is because
+ // some transformations (such as boxing a long -> Long or wideining an
+ // int -> long will change that number.
+ uint16_t num_regs;
+ size_t first_dest_reg;
+ if (LIKELY(code_item != nullptr)) {
+ num_regs = code_item->registers_size_;
+ first_dest_reg = num_regs - code_item->ins_size_;
+ // Parameter registers go at the end of the shadow frame.
+ DCHECK_NE(first_dest_reg, (size_t)-1);
+ } else {
+ // No local regs for proxy and native methods.
+ DCHECK(called_method->IsNative() || called_method->IsProxyMethod());
+ num_regs = GetInsForProxyOrNativeMethod(called_method);
+ first_dest_reg = 0;
+ }
+
+ // Allocate shadow frame on the stack.
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
+ ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
+
+ // Thread might be suspended during PerformArgumentConversions due to the
+ // allocations performed during boxing.
+ {
+ ScopedStackedShadowFramePusher pusher(
+ self, new_shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
+ if (!PerformArgumentConversions<is_range>(self, callsite_type, target_type,
+ shadow_frame, vregC, first_dest_reg,
+ arg, new_shadow_frame, result)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
+ }
+
+ // Do the call now.
+ if (LIKELY(Runtime::Current()->IsStarted())) {
+ ArtMethod* target = new_shadow_frame->GetMethod();
+ if (ClassLinker::ShouldUseInterpreterEntrypoint(
+ target,
+ target->GetEntryPointFromQuickCompiledCode())) {
+ ArtInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result);
+ } else {
+ ArtInterpreterToCompiledCodeBridge(
+ self, shadow_frame.GetMethod(), code_item, new_shadow_frame, result);
+ }
+ } else {
+ UnstartedRuntime::Invoke(self, code_item, new_shadow_frame, result, first_dest_reg);
+ }
+
+ // TODO(narayan): Perform return value conversions.
+
+ return !self->IsExceptionPending();
+}
+
template <bool is_range,
bool do_assignability_check>
static inline bool DoCallCommon(ArtMethod* called_method,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 7d54d0a802..bdb6bd39a1 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -463,6 +463,24 @@ static inline bool IsBackwardBranch(int32_t branch_offset) {
return branch_offset <= 0;
}
+// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
+static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
+ size_t dest_reg, size_t src_reg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Uint required, so that sign extension does not make this wrong on 64b systems
+ uint32_t src_value = shadow_frame.GetVReg(src_reg);
+ mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
+
+ // If both register locations contains the same value, the register probably holds a reference.
+ // Note: As an optimization, non-moving collectors leave a stale reference value
+ // in the references array even after the original vreg was overwritten to a non-reference.
+ if (src_value == reinterpret_cast<uintptr_t>(o)) {
+ new_shadow_frame->SetVRegReference(dest_reg, o);
+ } else {
+ new_shadow_frame->SetVReg(dest_reg, src_value);
+ }
+}
+
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
const DexFile::CodeItem* code_item,
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 6cff1da357..295cdec9b9 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -20,6 +20,7 @@
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
+#include "jvalue-inl.h"
#include "safe_math.h"
namespace art {
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 845fc60b12..4a3654be3e 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -38,6 +38,7 @@
#include "gc/reference_processor.h"
#include "handle_scope-inl.h"
#include "interpreter/interpreter_common.h"
+#include "jvalue-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class.h"
#include "mirror/field-inl.h"
@@ -340,7 +341,7 @@ void UnstartedRuntime::UnstartedClassGetDeclaredMethod(
Runtime* runtime = Runtime::Current();
bool transaction = runtime->IsActiveTransaction();
PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
- mirror::Method* method;
+ ObjPtr<mirror::Method> method;
if (transaction) {
if (pointer_size == PointerSize::k64) {
method = mirror::Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
@@ -374,7 +375,7 @@ void UnstartedRuntime::UnstartedClassGetDeclaredConstructor(
Runtime* runtime = Runtime::Current();
bool transaction = runtime->IsActiveTransaction();
PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
- mirror::Constructor* constructor;
+ ObjPtr<mirror::Constructor> constructor;
if (transaction) {
if (pointer_size == PointerSize::k64) {
constructor = mirror::Class::GetDeclaredConstructorInternal<PointerSize::k64,
@@ -952,7 +953,7 @@ void UnstartedRuntime::UnstartedDexCacheGetDexNative(
ObjPtr<mirror::Object> dex = GetDexFromDexCache(self, src->AsDexCache());
if (dex != nullptr) {
have_dex = true;
- result->SetL(dex.Ptr());
+ result->SetL(dex);
}
}
if (!have_dex) {
@@ -1185,13 +1186,13 @@ void UnstartedRuntime::UnstartedStringToCharArray(
// This allows statically initializing ConcurrentHashMap and SynchronousQueue.
void UnstartedRuntime::UnstartedReferenceGetReferent(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
- mirror::Reference* const ref = down_cast<mirror::Reference*>(
+ ObjPtr<mirror::Reference> const ref = down_cast<mirror::Reference*>(
shadow_frame->GetVRegReference(arg_offset));
if (ref == nullptr) {
AbortTransactionOrFail(self, "Reference.getReferent() with null object");
return;
}
- mirror::Object* const referent =
+ ObjPtr<mirror::Object> const referent =
Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref);
result->SetL(referent);
}
@@ -1456,7 +1457,7 @@ void UnstartedRuntime::UnstartedMethodInvoke(
ScopedLocalRef<jobject> result_jobj(env,
InvokeMethod(soa, java_method.get(), java_receiver.get(), java_args.get()));
- result->SetL(self->DecodeJObject(result_jobj.get()).Ptr());
+ result->SetL(self->DecodeJObject(result_jobj.get()));
// Conservatively flag all exceptions as transaction aborts. This way we don't need to unwrap
// InvocationTargetExceptions.
@@ -1619,9 +1620,9 @@ void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(
uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
ScopedObjectAccessUnchecked soa(self);
if (Runtime::Current()->IsActiveTransaction()) {
- result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<true>(soa)).Ptr());
+ result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<true>(soa)));
} else {
- result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<false>(soa)).Ptr());
+ result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<false>(soa)));
}
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index c5f95eb6dc..f2bda05b94 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -422,14 +422,14 @@ JavaVMExt::JavaVMExt(Runtime* runtime, const RuntimeArgumentMap& runtime_options
tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
|| VLOG_IS_ON(third_party_jni)),
trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
- globals_lock_("JNI global reference table lock"),
globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
libraries_(new Libraries),
unchecked_functions_(&gJniInvokeInterface),
- weak_globals_lock_("JNI weak global reference table lock", kJniWeakGlobalsLock),
weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
allow_accessing_weak_globals_(true),
- weak_globals_add_condition_("weak globals add condition", weak_globals_lock_),
+ weak_globals_add_condition_("weak globals add condition",
+ (CHECK(Locks::jni_weak_globals_lock_ != nullptr),
+ *Locks::jni_weak_globals_lock_)),
env_hooks_() {
functions = unchecked_functions_;
SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni));
@@ -537,7 +537,7 @@ jobject JavaVMExt::AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
if (obj == nullptr) {
return nullptr;
}
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
return reinterpret_cast<jobject>(ref);
}
@@ -546,7 +546,7 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
if (obj == nullptr) {
return nullptr;
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
@@ -558,7 +558,7 @@ void JavaVMExt::DeleteGlobalRef(Thread* self, jobject obj) {
if (obj == nullptr) {
return;
}
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
<< "failed to find entry";
@@ -569,7 +569,7 @@ void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
if (obj == nullptr) {
return;
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
<< "failed to find entry";
@@ -597,11 +597,11 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
}
Thread* self = Thread::Current();
{
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
os << "; globals=" << globals_.Capacity();
}
{
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
if (weak_globals_.Capacity() > 0) {
os << " (plus " << weak_globals_.Capacity() << " weak)";
}
@@ -617,7 +617,7 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
void JavaVMExt::DisallowNewWeakGlobals() {
CHECK(!kUseReadBarrier);
Thread* const self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
// DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
// mutator lock exclusively held so that we don't have any threads in the middle of
// DecodeWeakGlobal.
@@ -628,7 +628,7 @@ void JavaVMExt::DisallowNewWeakGlobals() {
void JavaVMExt::AllowNewWeakGlobals() {
CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
weak_globals_add_condition_.Broadcast(self);
}
@@ -636,7 +636,7 @@ void JavaVMExt::AllowNewWeakGlobals() {
void JavaVMExt::BroadcastForNewWeakGlobals() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.Broadcast(self);
}
@@ -645,7 +645,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeGlobal(IndirectRef ref) {
}
void JavaVMExt::UpdateGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result) {
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.Update(ref, result);
}
@@ -671,13 +671,13 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref
if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
return weak_globals_.SynchronizedGet(ref);
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
return DecodeWeakGlobalLocked(self, ref);
}
ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalLocked(Thread* self, IndirectRef ref) {
if (kDebugLocking) {
- weak_globals_lock_.AssertHeld(self);
+ Locks::jni_weak_globals_lock_->AssertHeld(self);
}
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
@@ -700,7 +700,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, I
bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
@@ -712,18 +712,18 @@ bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
}
void JavaVMExt::UpdateWeakGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result) {
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_.Update(ref, result);
}
void JavaVMExt::DumpReferenceTables(std::ostream& os) {
Thread* self = Thread::Current();
{
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.Dump(os);
}
{
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_.Dump(os);
}
}
@@ -920,7 +920,7 @@ void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) {
}
void JavaVMExt::SweepJniWeakGlobals(IsMarkedVisitor* visitor) {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jni_weak_globals_lock_);
Runtime* const runtime = Runtime::Current();
for (auto* entry : weak_globals_) {
// Need to skip null here to distinguish between null entries and cleared weak ref entries.
@@ -937,13 +937,13 @@ void JavaVMExt::SweepJniWeakGlobals(IsMarkedVisitor* visitor) {
}
void JavaVMExt::TrimGlobals() {
- WriterMutexLock mu(Thread::Current(), globals_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::jni_globals_lock_);
globals_.Trim();
}
void JavaVMExt::VisitRoots(RootVisitor* visitor) {
Thread* self = Thread::Current();
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.VisitRoots(visitor, RootInfo(kRootJNIGlobal));
// The weak_globals table is visited by the GC itself (because it mutates the table).
}
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 2e59a9d65f..05717f41e7 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -109,72 +109,81 @@ class JavaVMExt : public JavaVM {
REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
- REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_);
+ REQUIRES(!Locks::jni_libraries_lock_,
+ !Locks::jni_globals_lock_,
+ !Locks::jni_weak_globals_lock_);
void DumpReferenceTables(std::ostream& os)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_, !Locks::jni_weak_globals_lock_);
bool SetCheckJniEnabled(bool enabled);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!globals_lock_);
+ REQUIRES(!Locks::jni_globals_lock_);
- void DisallowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void AllowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void BroadcastForNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ void DisallowNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
+ void AllowNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
+ void BroadcastForNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
jobject AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_);
jweak AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
- void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_);
+ void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!Locks::jni_globals_lock_);
- void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_);
+ void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!Locks::jni_weak_globals_lock_);
void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
ObjPtr<mirror::Object> DecodeGlobal(IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_);
ObjPtr<mirror::Object> DecodeWeakGlobal(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ REQUIRES(!Locks::jni_weak_globals_lock_);
ObjPtr<mirror::Object> DecodeWeakGlobalLocked(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(weak_globals_lock_);
+ REQUIRES(Locks::jni_weak_globals_lock_);
// Like DecodeWeakGlobal() but to be used only during a runtime shutdown where self may be
// null.
ObjPtr<mirror::Object> DecodeWeakGlobalDuringShutdown(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ REQUIRES(!Locks::jni_weak_globals_lock_);
// Checks if the weak global ref has been cleared by the GC without decode (read barrier.)
bool IsWeakGlobalCleared(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
-
- Mutex& WeakGlobalsLock() RETURN_CAPABILITY(weak_globals_lock_) {
- return weak_globals_lock_;
- }
+ REQUIRES(!Locks::jni_weak_globals_lock_);
void UpdateWeakGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
const JNIInvokeInterface* GetUncheckedFunctions() const {
return unchecked_functions_;
}
void TrimGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!globals_lock_);
+ REQUIRES(!Locks::jni_globals_lock_);
jint HandleGetEnv(/*out*/void** env, jint version);
@@ -187,7 +196,7 @@ class JavaVMExt : public JavaVM {
bool MayAccessWeakGlobalsUnlocked(Thread* self) const REQUIRES_SHARED(Locks::mutator_lock_);
bool MayAccessWeakGlobals(Thread* self) const
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(weak_globals_lock_);
+ REQUIRES(Locks::jni_weak_globals_lock_);
Runtime* const runtime_;
@@ -203,8 +212,6 @@ class JavaVMExt : public JavaVM {
// Extra diagnostics.
const std::string trace_;
- // JNI global references.
- ReaderWriterMutex globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
IndirectReferenceTable globals_;
@@ -215,8 +222,6 @@ class JavaVMExt : public JavaVM {
// Used by -Xcheck:jni.
const JNIInvokeInterface* const unchecked_functions_;
- // JNI weak global references.
- Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Since weak_globals_ contain weak roots, be careful not to
// directly access the object references in it. Use Get() with the
// read barrier enabled.
@@ -224,7 +229,7 @@ class JavaVMExt : public JavaVM {
IndirectReferenceTable weak_globals_;
// Not guarded by weak_globals_lock since we may use SynchronizedGet in DecodeWeakGlobal.
Atomic<bool> allow_accessing_weak_globals_;
- ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
+ ConditionVariable weak_globals_add_condition_ GUARDED_BY(Locks::jni_weak_globals_lock_);
// TODO Maybe move this to Runtime.
std::vector<GetEnvHook> env_hooks_;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 273c67d167..8eebe56da7 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -88,13 +88,19 @@ static std::string NormalizeJniClassDescriptor(const char* name) {
return result;
}
-static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
- const char* name, const char* sig, const char* kind)
+static void ThrowNoSuchMethodError(ScopedObjectAccess& soa,
+ ObjPtr<mirror::Class> c,
+ const char* name,
+ const char* sig,
+ const char* kind)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
- kind, c->GetDescriptor(&temp), name, sig);
+ kind,
+ c->GetDescriptor(&temp),
+ name,
+ sig);
}
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa,
@@ -148,7 +154,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
}
}
if (method == nullptr || method->IsStatic() != is_static) {
- ThrowNoSuchMethodError(soa, c.Ptr(), name, sig, is_static ? "static" : "non-static");
+ ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
return nullptr;
}
return soa.EncodeMethod(method);
@@ -373,7 +379,7 @@ class JNI {
// Not even a java.lang.reflect.Field, return null. TODO, is this check necessary?
return nullptr;
}
- ObjPtr<mirror::Field> field = down_cast<mirror::Field*>(obj_field.Ptr());
+ ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(obj_field);
return soa.EncodeField(field->GetArtField());
}
@@ -629,7 +635,7 @@ class JNI {
WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
- mirror::Object* result = c->AllocObject(soa.Self());
+ ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
if (result == nullptr) {
return nullptr;
}
@@ -656,7 +662,7 @@ class JNI {
WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
- mirror::Object* result = c->AllocObject(soa.Self());
+ ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
if (result == nullptr) {
return nullptr;
}
@@ -2245,14 +2251,14 @@ class JNI {
<< "Failed to register native method "
<< PrettyDescriptor(c) << "." << name << sig << " in "
<< c->GetDexCache()->GetLocation()->ToModifiedUtf8();
- ThrowNoSuchMethodError(soa, c.Ptr(), name, sig, "static or non-static");
+ ThrowNoSuchMethodError(soa, c, name, sig, "static or non-static");
return JNI_ERR;
} else if (!m->IsNative()) {
LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
<< "Failed to register non-native method "
<< PrettyDescriptor(c) << "." << name << sig
<< " as native";
- ThrowNoSuchMethodError(soa, c.Ptr(), name, sig, "native");
+ ThrowNoSuchMethodError(soa, c, name, sig, "native");
return JNI_ERR;
}
diff --git a/runtime/jvalue-inl.h b/runtime/jvalue-inl.h
new file mode 100644
index 0000000000..b33686c6c5
--- /dev/null
+++ b/runtime/jvalue-inl.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JVALUE_INL_H_
+#define ART_RUNTIME_JVALUE_INL_H_
+
+#include "jvalue.h"
+
+#include "obj_ptr.h"
+
+namespace art {
+
+inline void JValue::SetL(ObjPtr<mirror::Object> new_l) {
+ l = new_l.Ptr();
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_JVALUE_INL_H_
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 7b91b0b2b6..52a0f23361 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -18,9 +18,12 @@
#define ART_RUNTIME_JVALUE_H_
#include "base/macros.h"
+#include "base/mutex.h"
#include <stdint.h>
+#include "obj_ptr.h"
+
namespace art {
namespace mirror {
class Object;
@@ -52,8 +55,10 @@ union PACKED(4) JValue {
int64_t GetJ() const { return j; }
void SetJ(int64_t new_j) { j = new_j; }
- mirror::Object* GetL() const { return l; }
- void SetL(mirror::Object* new_l) { l = new_l; }
+ mirror::Object* GetL() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return l;
+ }
+ void SetL(ObjPtr<mirror::Object> new_l) REQUIRES_SHARED(Locks::mutator_lock_);
int16_t GetS() const { return s; }
void SetS(int16_t new_s) {
diff --git a/runtime/method_handles-inl.h b/runtime/method_handles-inl.h
new file mode 100644
index 0000000000..5f9824c079
--- /dev/null
+++ b/runtime/method_handles-inl.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_METHOD_HANDLES_INL_H_
+#define ART_RUNTIME_METHOD_HANDLES_INL_H_
+
+#include "method_handles.h"
+
+#include "common_throws.h"
+#include "dex_instruction.h"
+#include "interpreter/interpreter_common.h"
+#include "jvalue.h"
+#include "mirror/class.h"
+#include "mirror/method_type.h"
+#include "mirror/object.h"
+#include "reflection.h"
+#include "stack.h"
+
+namespace art {
+
+// Assigns |type| to the primitive type associated with |dst_class|. Returns
+// true iff. |dst_class| was a boxed type (Integer, Long etc.), false otherwise.
+REQUIRES_SHARED(Locks::mutator_lock_)
+static inline bool GetPrimitiveType(ObjPtr<mirror::Class> dst_class, Primitive::Type* type) {
+ if (dst_class->DescriptorEquals("Ljava/lang/Boolean;")) {
+ (*type) = Primitive::kPrimBoolean;
+ return true;
+ } else if (dst_class->DescriptorEquals("Ljava/lang/Byte;")) {
+ (*type) = Primitive::kPrimByte;
+ return true;
+ } else if (dst_class->DescriptorEquals("Ljava/lang/Character;")) {
+ (*type) = Primitive::kPrimChar;
+ return true;
+ } else if (dst_class->DescriptorEquals("Ljava/lang/Float;")) {
+ (*type) = Primitive::kPrimFloat;
+ return true;
+ } else if (dst_class->DescriptorEquals("Ljava/lang/Double;")) {
+ (*type) = Primitive::kPrimDouble;
+ return true;
+ } else if (dst_class->DescriptorEquals("Ljava/lang/Integer;")) {
+ (*type) = Primitive::kPrimInt;
+ return true;
+ } else if (dst_class->DescriptorEquals("Ljava/lang/Long;")) {
+ (*type) = Primitive::kPrimLong;
+ return true;
+ } else if (dst_class->DescriptorEquals("Ljava/lang/Short;")) {
+ (*type) = Primitive::kPrimShort;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// A convenience class that allows for iteration through a list of
+// input argument registers |arg| for non-range invokes or a list of
+// consecutive registers starting with a given based for range
+// invokes.
+template <bool is_range> class ArgIterator {
+ public:
+ ArgIterator(size_t first_src_reg,
+ const uint32_t (&arg)[Instruction::kMaxVarArgRegs]) :
+ first_src_reg_(first_src_reg),
+ arg_(arg),
+ arg_index_(0) {
+ }
+
+ uint32_t Next() {
+ const uint32_t next = (is_range ? first_src_reg_ + arg_index_ : arg_[arg_index_]);
+ ++arg_index_;
+
+ return next;
+ }
+
+ uint32_t NextPair() {
+ const uint32_t next = (is_range ? first_src_reg_ + arg_index_ : arg_[arg_index_]);
+ arg_index_ += 2;
+
+ return next;
+ }
+
+ private:
+ const size_t first_src_reg_;
+ const uint32_t (&arg_)[Instruction::kMaxVarArgRegs];
+ size_t arg_index_;
+};
+
+template <bool is_range>
+bool PerformArgumentConversions(Thread* self,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> callee_type,
+ const ShadowFrame& caller_frame,
+ uint32_t first_src_reg,
+ uint32_t first_dest_reg,
+ const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ ShadowFrame* callee_frame,
+ JValue* result) {
+ StackHandleScope<4> hs(self);
+ Handle<mirror::ObjectArray<mirror::Class>> from_types(hs.NewHandle(callsite_type->GetPTypes()));
+ Handle<mirror::ObjectArray<mirror::Class>> to_types(hs.NewHandle(callee_type->GetPTypes()));
+
+ const int32_t num_method_params = from_types->GetLength();
+ if (to_types->GetLength() != num_method_params) {
+ ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
+ result->SetJ(0);
+ return false;
+ }
+
+ ArgIterator<is_range> input_args(first_src_reg, arg);
+ size_t to_arg_index = 0;
+ MutableHandle<mirror::Class> from(hs.NewHandle<mirror::Class>(nullptr));
+ MutableHandle<mirror::Class> to(hs.NewHandle<mirror::Class>(nullptr));
+ for (int32_t i = 0; i < num_method_params; ++i) {
+ from.Assign(from_types->GetWithoutChecks(i));
+ to.Assign(to_types->GetWithoutChecks(i));
+
+ const Primitive::Type from_type = from->GetPrimitiveType();
+ const Primitive::Type to_type = to->GetPrimitiveType();
+
+ // Easy case - the types are identical. Nothing left to do except to pass
+ // the arguments along verbatim.
+ if (from.Get() == to.Get()) {
+ interpreter::AssignRegister(callee_frame,
+ caller_frame,
+ first_dest_reg + to_arg_index,
+ input_args.Next());
+ ++to_arg_index;
+
+ // This is a wide argument, we must use the second half of the register
+ // pair as well.
+ if (Primitive::Is64BitType(from_type)) {
+ interpreter::AssignRegister(callee_frame,
+ caller_frame,
+ first_dest_reg + to_arg_index,
+ input_args.Next());
+ ++to_arg_index;
+ }
+
+ continue;
+ } else if ((from_type != Primitive::kPrimNot) && (to_type != Primitive::kPrimNot)) {
+ // They are both primitive types - we should perform any widening or
+ // narrowing conversions as applicable.
+ JValue from_value;
+ JValue to_value;
+
+ if (Primitive::Is64BitType(from_type)) {
+ from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
+ } else {
+ from_value.SetI(caller_frame.GetVReg(input_args.Next()));
+ }
+
+ // Throws a ClassCastException if we're unable to convert a primitive value.
+ if (!ConvertPrimitiveValue(false, from_type, to_type, from_value, &to_value)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
+
+ if (Primitive::Is64BitType(to_type)) {
+ callee_frame->SetVRegLong(first_dest_reg + to_arg_index, to_value.GetJ());
+ to_arg_index += 2;
+ } else {
+ callee_frame->SetVReg(first_dest_reg + to_arg_index, to_value.GetI());
+ ++to_arg_index;
+ }
+ } else if ((from_type == Primitive::kPrimNot) && (to_type == Primitive::kPrimNot)) {
+ // They're both reference types. If "from" is null, we can pass it
+ // through unchanged. If not, we must generate a cast exception if
+ // |to| is not assignable from the dynamic type of |ref|.
+ const size_t next_arg_reg = input_args.Next();
+ mirror::Object* const ref = caller_frame.GetVRegReference(next_arg_reg);
+ if (ref == nullptr || to->IsAssignableFrom(ref->GetClass())) {
+ interpreter::AssignRegister(callee_frame,
+ caller_frame,
+ first_dest_reg + to_arg_index,
+ next_arg_reg);
+ ++to_arg_index;
+ } else {
+ ThrowClassCastException(to.Get(), ref->GetClass());
+ result->SetL(0);
+ return false;
+ }
+ } else {
+ // Precisely one of the source or the destination are reference types.
+ // We must box or unbox.
+ if (to_type == Primitive::kPrimNot) {
+ // The target type is a reference, we must box.
+ Primitive::Type type;
+ // TODO(narayan): This is a CHECK for now. There might be a few corner cases
+ // here that we might not have handled yet. For exmple, if |to| is java/lang/Number;,
+ // we will need to box this "naturally".
+ CHECK(GetPrimitiveType(to.Get(), &type));
+
+ JValue from_value;
+ JValue to_value;
+
+ if (Primitive::Is64BitType(from_type)) {
+ from_value.SetJ(caller_frame.GetVRegLong(input_args.NextPair()));
+ } else {
+ from_value.SetI(caller_frame.GetVReg(input_args.Next()));
+ }
+
+ // First perform a primitive conversion to the unboxed equivalent of the target,
+ // if necessary. This should be for the rarer cases like (int->Long) etc.
+ if (UNLIKELY(from_type != type)) {
+ if (!ConvertPrimitiveValue(false, from_type, type, from_value, &to_value)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
+ } else {
+ to_value = from_value;
+ }
+
+ // Then perform the actual boxing, and then set the reference.
+ ObjPtr<mirror::Object> boxed = BoxPrimitive(type, to_value);
+ callee_frame->SetVRegReference(first_dest_reg + to_arg_index, boxed.Ptr());
+ ++to_arg_index;
+ } else {
+ // The target type is a primitive, we must unbox.
+ ObjPtr<mirror::Object> ref(caller_frame.GetVRegReference(input_args.Next()));
+
+ // Note that UnboxPrimitiveForResult already performs all of the type
+ // conversions that we want, based on |to|.
+ JValue unboxed_value;
+ if (!UnboxPrimitiveForResult(ref, to.Get(), &unboxed_value)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
+
+ if (Primitive::Is64BitType(to_type)) {
+ callee_frame->SetVRegLong(first_dest_reg + to_arg_index, unboxed_value.GetJ());
+ to_arg_index += 2;
+ } else {
+ callee_frame->SetVReg(first_dest_reg + to_arg_index, unboxed_value.GetI());
+ ++to_arg_index;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_METHOD_HANDLES_INL_H_
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index 5c68a8f1a2..a36b66db9a 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -19,8 +19,17 @@
#include <ostream>
+#include "dex_instruction.h"
+#include "jvalue.h"
+
namespace art {
+namespace mirror {
+ class MethodType;
+}
+
+class ShadowFrame;
+
// Defines the behaviour of a given method handle. The behaviour
// of a handle of a given kind is identical to the dex bytecode behaviour
// of the equivalent instruction.
@@ -46,6 +55,22 @@ inline bool IsInvoke(const MethodHandleKind handle_kind) {
return handle_kind <= kLastInvokeKind;
}
+// Perform argument conversions between |callsite_type| (the type of the
+// incoming arguments) and |callee_type| (the type of the method being
+// invoked). These include widening and narrowing conversions as well as
+// boxing and unboxing. Returns true on success, on false on failure. A
+// pending exception will always be set on failure.
+template <bool is_range> REQUIRES_SHARED(Locks::mutator_lock_)
+bool PerformArgumentConversions(Thread* self,
+ Handle<mirror::MethodType> callsite_type,
+ Handle<mirror::MethodType> callee_type,
+ const ShadowFrame& caller_frame,
+ uint32_t first_src_reg,
+ uint32_t first_dest_reg,
+ const uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ ShadowFrame* callee_frame,
+ JValue* result);
+
} // namespace art
#endif // ART_RUNTIME_METHOD_HANDLES_H_
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7cbcac8030..d18781a617 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -25,6 +25,7 @@
#include "base/stringprintf.h"
#include "class-inl.h"
#include "gc/heap-inl.h"
+#include "obj_ptr-inl.h"
#include "thread.h"
namespace art {
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index aee48ccb8c..1aa38dd6b4 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -121,7 +121,7 @@ void Array::ThrowArrayIndexOutOfBoundsException(int32_t index) {
art::ThrowArrayIndexOutOfBoundsException(index, GetLength());
}
-void Array::ThrowArrayStoreException(Object* object) {
+void Array::ThrowArrayStoreException(ObjPtr<Object> object) {
art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
}
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 6c82eb92a1..04d02f774f 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
+#include "obj_ptr.h"
#include "object.h"
#include "object_callbacks.h"
@@ -89,7 +90,7 @@ class MANAGED Array : public Object {
REQUIRES(!Roles::uninterruptible_);
protected:
- void ThrowArrayStoreException(Object* object) REQUIRES_SHARED(Locks::mutator_lock_)
+ void ThrowArrayStoreException(ObjPtr<Object> object) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
private:
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 98d383dac9..14bd243795 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -306,14 +306,14 @@ inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, Pointer
SetEmbeddedVTableEntryUnchecked(i, method, pointer_size);
}
-inline bool Class::Implements(Class* klass) {
+inline bool Class::Implements(ObjPtr<Class> klass) {
DCHECK(klass != nullptr);
DCHECK(klass->IsInterface()) << PrettyClass(this);
// All interfaces implemented directly and by our superclass, and
// recursively all super-interfaces of those interfaces, are listed
// in iftable_, so we can just do a linear scan through that.
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; i++) {
if (iftable->GetInterface(i) == klass) {
return true;
@@ -353,7 +353,7 @@ inline bool Class::IsAssignableFromArray(ObjPtr<Class> src) {
if (!IsArrayClass()) {
// If "this" is not also an array, it must be Object.
// src's super should be java_lang_Object, since it is an array.
- Class* java_lang_Object = src->GetSuperClass();
+ ObjPtr<Class> java_lang_Object = src->GetSuperClass();
DCHECK(java_lang_Object != nullptr) << PrettyClass(src);
DCHECK(java_lang_Object->GetSuperClass() == nullptr) << PrettyClass(src);
return this == java_lang_Object;
@@ -384,7 +384,7 @@ inline bool Class::ResolvedFieldAccessTest(ObjPtr<Class> access_to,
DCHECK(dex_access_to != nullptr);
if (UNLIKELY(!this->CanAccess(dex_access_to))) {
if (throw_on_failure) {
- ThrowIllegalAccessErrorClass(this, dex_access_to.Ptr());
+ ThrowIllegalAccessErrorClass(this, dex_access_to);
}
return false;
}
@@ -451,15 +451,20 @@ inline bool Class::CheckResolvedFieldAccess(ObjPtr<Class> access_to,
return ResolvedFieldAccessTest<true, true>(access_to, field, field_idx, nullptr);
}
-inline bool Class::CanAccessResolvedMethod(Class* access_to, ArtMethod* method,
- DexCache* dex_cache, uint32_t method_idx) {
+inline bool Class::CanAccessResolvedMethod(ObjPtr<Class> access_to,
+ ArtMethod* method,
+ ObjPtr<DexCache> dex_cache,
+ uint32_t method_idx) {
return ResolvedMethodAccessTest<false, false, kStatic>(access_to, method, method_idx, dex_cache);
}
template <InvokeType throw_invoke_type>
-inline bool Class::CheckResolvedMethodAccess(Class* access_to, ArtMethod* method,
+inline bool Class::CheckResolvedMethodAccess(ObjPtr<Class> access_to,
+ ArtMethod* method,
uint32_t method_idx) {
- return ResolvedMethodAccessTest<true, true, throw_invoke_type>(access_to, method, method_idx,
+ return ResolvedMethodAccessTest<true, true, throw_invoke_type>(access_to,
+ method,
+ method_idx,
nullptr);
}
@@ -478,13 +483,13 @@ inline bool Class::IsSubClass(ObjPtr<Class> klass) {
inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method,
PointerSize pointer_size) {
- Class* declaring_class = method->GetDeclaringClass();
+ ObjPtr<Class> declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
DCHECK(!method->IsCopied());
// TODO cache to improve lookup speed
const int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; i++) {
if (iftable->GetInterface(i) == declaring_class) {
return iftable->GetMethodArray(i)->GetElementPtrSize<ArtMethod*>(
@@ -526,14 +531,14 @@ inline IfTable* Class::GetIfTable() {
}
inline int32_t Class::GetIfTableCount() {
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
if (iftable == nullptr) {
return 0;
}
return iftable->Count();
}
-inline void Class::SetIfTable(IfTable* new_iftable) {
+inline void Class::SetIfTable(ObjPtr<IfTable> new_iftable) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable);
}
@@ -544,20 +549,20 @@ inline LengthPrefixedArray<ArtField>* Class::GetIFieldsPtr() {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
- Class* super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
return (super_class != nullptr)
? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags, kReadBarrierOption>(),
- sizeof(mirror::HeapReference<mirror::Object>)))
+ kHeapReferenceSize))
: ClassOffset();
}
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointer_size) {
DCHECK(IsResolved());
- uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ uint32_t base = sizeof(Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
// Static fields come after the embedded tables.
- base = mirror::Class::ComputeClassSize(
+ base = Class::ComputeClassSize(
true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
}
return MemberOffset(base);
@@ -566,10 +571,10 @@ inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointe
inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(
PointerSize pointer_size) {
DCHECK(IsLoaded());
- uint32_t base = sizeof(mirror::Class); // Static fields come after the class.
+ uint32_t base = sizeof(Class); // Static fields come after the class.
if (ShouldHaveEmbeddedVTable()) {
// Static fields come after the embedded tables.
- base = mirror::Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
+ base = Class::ComputeClassSize(true, GetVTableDuringLinking()->GetLength(),
0, 0, 0, 0, 0, pointer_size);
}
return MemberOffset(base);
@@ -650,7 +655,7 @@ inline String* Class::GetName() {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
-inline void Class::SetName(String* name) {
+inline void Class::SetName(ObjPtr<String> name) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name);
} else {
@@ -700,7 +705,7 @@ inline void Class::CheckObjectAlloc() {
}
template<bool kIsInstrumented, bool kCheckAddFinalizer>
-inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
+inline ObjPtr<Object> Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
CheckObjectAlloc();
gc::Heap* heap = Runtime::Current()->GetHeap();
const bool add_finalizer = kCheckAddFinalizer && IsFinalizable();
@@ -708,7 +713,7 @@ inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
DCHECK(!IsFinalizable());
}
// Note that the this pointer may be invalidated after the allocation.
- ObjPtr<mirror::Object> obj =
+ ObjPtr<Object> obj =
heap->AllocObjectWithAllocator<kIsInstrumented, false>(self,
this,
this->object_size_,
@@ -724,11 +729,11 @@ inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
return obj.Ptr();
}
-inline Object* Class::AllocObject(Thread* self) {
+inline ObjPtr<Object> Class::AllocObject(Thread* self) {
return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
-inline Object* Class::AllocNonMovableObject(Thread* self) {
+inline ObjPtr<Object> Class::AllocNonMovableObject(Thread* self) {
return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
}
@@ -750,7 +755,7 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_vtable,
}
// Space used by reference statics.
- size += num_ref_static_fields * sizeof(HeapReference<Object>);
+ size += num_ref_static_fields * kHeapReferenceSize;
if (!IsAligned<8>(size) && num_64bit_static_fields > 0) {
uint32_t gap = 8 - (size & 0x7);
size += gap; // will be padded
@@ -781,8 +786,8 @@ template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
-inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
- VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
+inline void Class::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) {
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass.Ptr(), visitor);
// Right after a class is allocated, but not yet loaded
// (kStatusNotReady, see ClassLinker::LoadClass()), GC may find it
// and scan it. IsTemp() may call Class::GetAccessFlags() but may
@@ -810,7 +815,7 @@ inline bool Class::IsReferenceClass() const {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Class::IsClassClass() {
- Class* java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
+ ObjPtr<Class> java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
template GetClass<kVerifyFlags, kReadBarrierOption>();
return this == java_lang_Class;
}
@@ -883,12 +888,11 @@ inline void Class::SetSlowPath(bool enabled) {
SetFieldBoolean<false, false>(GetSlowPathFlagOffset(), enabled);
}
-inline void Class::InitializeClassVisitor::operator()(ObjPtr<mirror::Object> obj,
+inline void Class::InitializeClassVisitor::operator()(ObjPtr<Object> obj,
size_t usable_size) const {
DCHECK_LE(class_size_, usable_size);
// Avoid AsClass as object is not yet in live bitmap or allocation stack.
- ObjPtr<mirror::Class> klass = ObjPtr<mirror::Class>::DownCast(obj);
- // DCHECK(klass->IsClass());
+ ObjPtr<Class> klass = ObjPtr<Class>::DownCast(obj);
klass->SetClassSize(class_size_);
klass->SetPrimitiveType(Primitive::kPrimNot); // Default to not being primitive.
klass->SetDexClassDefIndex(DexFile::kDexNoIndex16); // Default to no valid class def index.
@@ -920,7 +924,7 @@ inline uint32_t Class::NumDirectInterfaces() {
} else if (IsArrayClass()) {
return 2;
} else if (IsProxyClass()) {
- mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces();
+ ObjectArray<Class>* interfaces = GetInterfaces();
return interfaces != nullptr ? interfaces->GetLength() : 0;
} else {
const DexFile::TypeList* interfaces = GetInterfaceTypeList();
@@ -941,7 +945,7 @@ inline StringDexCacheType* Class::GetDexCacheStrings() {
}
template<ReadBarrierOption kReadBarrierOption, class Visitor>
-void mirror::Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
+void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
for (ArtField& field : GetSFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
@@ -1070,7 +1074,7 @@ inline uint32_t Class::NumStaticFields() {
}
template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void Class::FixupNativePointers(mirror::Class* dest,
+inline void Class::FixupNativePointers(Class* dest,
PointerSize pointer_size,
const Visitor& visitor) {
// Update the field arrays.
@@ -1139,6 +1143,14 @@ inline bool Class::CanAccessMember(ObjPtr<Class> access_to, uint32_t member_flag
return this->IsInSamePackage(access_to);
}
+inline bool Class::CannotBeAssignedFromOtherTypes() {
+ if (!IsArrayClass()) {
+ return IsFinal();
+ }
+ ObjPtr<Class> component = GetComponentType();
+ return component->IsPrimitive() || component->CannotBeAssignedFromOtherTypes();
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 689dd227c6..f93f72ff8b 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -40,12 +40,12 @@ namespace mirror {
GcRoot<Class> Class::java_lang_Class_;
-void Class::SetClassClass(Class* java_lang_Class) {
+void Class::SetClassClass(ObjPtr<Class> java_lang_Class) {
CHECK(java_lang_Class_.IsNull())
<< java_lang_Class_.Read()
<< " " << java_lang_Class;
CHECK(java_lang_Class != nullptr);
- java_lang_Class->SetClassFlags(mirror::kClassFlagClass);
+ java_lang_Class->SetClassFlags(kClassFlagClass);
java_lang_Class_ = GcRoot<Class>(java_lang_Class);
}
@@ -58,7 +58,7 @@ void Class::VisitRoots(RootVisitor* visitor) {
java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-inline void Class::SetVerifyError(mirror::Object* error) {
+inline void Class::SetVerifyError(ObjPtr<Object> error) {
CHECK(error != nullptr) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_), error);
@@ -140,7 +140,7 @@ void Class::SetStatus(Handle<Class> h_this, Status new_status, Thread* self) {
}
}
-void Class::SetDexCache(DexCache* new_dex_cache) {
+void Class::SetDexCache(ObjPtr<DexCache> new_dex_cache) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache);
SetDexCacheStrings(new_dex_cache != nullptr ? new_dex_cache->GetStrings() : nullptr);
}
@@ -209,8 +209,8 @@ void Class::DumpClass(std::ostream& os, int flags) {
Thread* const self = Thread::Current();
StackHandleScope<2> hs(self);
- Handle<mirror::Class> h_this(hs.NewHandle(this));
- Handle<mirror::Class> h_super(hs.NewHandle(GetSuperClass()));
+ Handle<Class> h_this(hs.NewHandle(this));
+ Handle<Class> h_super(hs.NewHandle(GetSuperClass()));
auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
std::string temp;
@@ -231,12 +231,12 @@ void Class::DumpClass(std::ostream& os, int flags) {
if (num_direct_interfaces > 0) {
os << " interfaces (" << num_direct_interfaces << "):\n";
for (size_t i = 0; i < num_direct_interfaces; ++i) {
- Class* interface = GetDirectInterface(self, h_this, i);
+ ObjPtr<Class> interface = GetDirectInterface(self, h_this, i);
if (interface == nullptr) {
os << StringPrintf(" %2zd: nullptr!\n", i);
} else {
- const ClassLoader* cl = interface->GetClassLoader();
- os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl);
+ ObjPtr<ClassLoader> cl = interface->GetClassLoader();
+ os << StringPrintf(" %2zd: %s (cl=%p)\n", i, PrettyClass(interface).c_str(), cl.Ptr());
}
}
}
@@ -283,7 +283,7 @@ void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) {
// Sanity check that the number of bits set in the reference offset bitmap
// agrees with the number of references
uint32_t count = 0;
- for (Class* c = this; c != nullptr; c = c->GetSuperClass()) {
+ for (ObjPtr<Class> c = this; c != nullptr; c = c->GetSuperClass()) {
count += c->NumReferenceInstanceFieldsDuringLinking();
}
// +1 for the Class in Object.
@@ -338,7 +338,7 @@ bool Class::IsThrowableClass() {
return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this);
}
-void Class::SetClassLoader(ClassLoader* new_class_loader) {
+void Class::SetClassLoader(ObjPtr<ClassLoader> new_class_loader) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
} else {
@@ -356,7 +356,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
}
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -376,7 +376,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
}
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
@@ -386,7 +386,7 @@ ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache,
+ArtMethod* Class::FindInterfaceMethod(ObjPtr<DexCache> dex_cache,
uint32_t dex_method_idx,
PointerSize pointer_size) {
// Check the current class before checking the interfaces.
@@ -396,7 +396,7 @@ ArtMethod* Class::FindInterfaceMethod(const DexCache* dex_cache,
}
int32_t iftable_count = GetIfTableCount();
- IfTable* iftable = GetIfTable();
+ ObjPtr<IfTable> iftable = GetIfTable();
for (int32_t i = 0; i < iftable_count; ++i) {
method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(
dex_cache, dex_method_idx, pointer_size);
@@ -429,7 +429,7 @@ ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache,
+ArtMethod* Class::FindDeclaredDirectMethod(ObjPtr<DexCache> dex_cache,
uint32_t dex_method_idx,
PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
@@ -445,7 +445,7 @@ ArtMethod* Class::FindDeclaredDirectMethod(const DexCache* dex_cache,
ArtMethod* Class::FindDirectMethod(const StringPiece& name,
const StringPiece& signature,
PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -457,7 +457,7 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name,
ArtMethod* Class::FindDirectMethod(const StringPiece& name,
const Signature& signature,
PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -466,9 +466,10 @@ ArtMethod* Class::FindDirectMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindDirectMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindDirectMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
@@ -516,7 +517,7 @@ ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache,
+ArtMethod* Class::FindDeclaredVirtualMethod(ObjPtr<DexCache> dex_cache,
uint32_t dex_method_idx,
PointerSize pointer_size) {
if (GetDexCache() == dex_cache) {
@@ -540,9 +541,10 @@ ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name,
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const StringPiece& signature, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -551,9 +553,10 @@ ArtMethod* Class::FindVirtualMethod(
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(
- const StringPiece& name, const Signature& signature, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(const StringPiece& name,
+ const Signature& signature,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
if (method != nullptr) {
return method;
@@ -562,9 +565,10 @@ ArtMethod* Class::FindVirtualMethod(
return nullptr;
}
-ArtMethod* Class::FindVirtualMethod(
- const DexCache* dex_cache, uint32_t dex_method_idx, PointerSize pointer_size) {
- for (Class* klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
+ArtMethod* Class::FindVirtualMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
+ PointerSize pointer_size) {
+ for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
if (method != nullptr) {
return method;
@@ -591,8 +595,8 @@ ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerS
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
- MutableHandle<mirror::IfTable> iftable(hs.NewHandle(GetIfTable()));
- MutableHandle<mirror::Class> iface(hs.NewHandle<mirror::Class>(nullptr));
+ MutableHandle<IfTable> iftable(hs.NewHandle(GetIfTable()));
+ MutableHandle<Class> iface(hs.NewHandle<Class>(nullptr));
size_t iftable_count = GetIfTableCount();
// Find the method. We don't need to check for conflicts because they would have been in the
// copied virtuals of this interface. Order matters, traverse in reverse topological order; most
@@ -696,7 +700,7 @@ ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const String
return FindFieldByNameAndType(GetIFieldsPtr(), name, type);
}
-ArtField* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) {
+ArtField* Class::FindDeclaredInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx) {
if (GetDexCache() == dex_cache) {
for (ArtField& field : GetIFields()) {
if (field.GetDexFieldIndex() == dex_field_idx) {
@@ -710,7 +714,7 @@ ArtField* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t d
ArtField* Class::FindInstanceField(const StringPiece& name, const StringPiece& type) {
// Is the field in this class, or any of its superclasses?
// Interfaces are not relevant because they can't contain instance fields.
- for (Class* c = this; c != nullptr; c = c->GetSuperClass()) {
+ for (ObjPtr<Class> c = this; c != nullptr; c = c->GetSuperClass()) {
ArtField* f = c->FindDeclaredInstanceField(name, type);
if (f != nullptr) {
return f;
@@ -719,10 +723,10 @@ ArtField* Class::FindInstanceField(const StringPiece& name, const StringPiece& t
return nullptr;
}
-ArtField* Class::FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) {
+ArtField* Class::FindInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx) {
// Is the field in this class, or any of its superclasses?
// Interfaces are not relevant because they can't contain instance fields.
- for (Class* c = this; c != nullptr; c = c->GetSuperClass()) {
+ for (ObjPtr<Class> c = this; c != nullptr; c = c->GetSuperClass()) {
ArtField* f = c->FindDeclaredInstanceField(dex_cache, dex_field_idx);
if (f != nullptr) {
return f;
@@ -736,7 +740,7 @@ ArtField* Class::FindDeclaredStaticField(const StringPiece& name, const StringPi
return FindFieldByNameAndType(GetSFieldsPtr(), name, type);
}
-ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) {
+ArtField* Class::FindDeclaredStaticField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx) {
if (dex_cache == GetDexCache()) {
for (ArtField& field : GetSFields()) {
if (field.GetDexFieldIndex() == dex_field_idx) {
@@ -747,11 +751,13 @@ ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex
return nullptr;
}
-ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
+ArtField* Class::FindStaticField(Thread* self,
+ Handle<Class> klass,
+ const StringPiece& name,
const StringPiece& type) {
// Is the field in this class (or its interfaces), or any of its
// superclasses (or their interfaces)?
- for (Class* k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
+ for (ObjPtr<Class> k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredStaticField(name, type);
if (f != nullptr) {
@@ -759,11 +765,11 @@ ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const String
}
// Wrap k incase it moves during GetDirectInterface.
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
+ HandleWrapperObjPtr<Class> h_k(hs.NewHandleWrapper(&k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs2(self);
- Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
+ Handle<Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
@@ -774,10 +780,10 @@ ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const String
}
ArtField* Class::FindStaticField(Thread* self,
- Class* klass,
- const DexCache* dex_cache,
+ ObjPtr<Class> klass,
+ ObjPtr<DexCache> dex_cache,
uint32_t dex_field_idx) {
- for (Class* k = klass; k != nullptr; k = k->GetSuperClass()) {
+ for (ObjPtr<Class> k = klass; k != nullptr; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredStaticField(dex_cache, dex_field_idx);
if (f != nullptr) {
@@ -787,10 +793,10 @@ ArtField* Class::FindStaticField(Thread* self,
// from here, it takes a Handle as an argument, so we need to wrap `k`.
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_k(hs.NewHandle(k));
+ Handle<Class> h_k(hs.NewHandle(k));
// Is this field in any of this class' interfaces?
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
- mirror::Class* interface = GetDirectInterface(self, h_k, i);
+ ObjPtr<Class> interface = GetDirectInterface(self, h_k, i);
f = FindStaticField(self, interface, dex_cache, dex_field_idx);
if (f != nullptr) {
return f;
@@ -800,10 +806,12 @@ ArtField* Class::FindStaticField(Thread* self,
return nullptr;
}
-ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
+ArtField* Class::FindField(Thread* self,
+ Handle<Class> klass,
+ const StringPiece& name,
const StringPiece& type) {
// Find a field using the JLS field resolution order
- for (Class* k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
+ for (ObjPtr<Class> k = klass.Get(); k != nullptr; k = k->GetSuperClass()) {
// Is the field in this class?
ArtField* f = k->FindDeclaredInstanceField(name, type);
if (f != nullptr) {
@@ -815,10 +823,10 @@ ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece&
}
// Is this field in any of this class' interfaces?
StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k));
+ HandleWrapperObjPtr<Class> h_k(hs.NewHandleWrapper(&k));
for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) {
StackHandleScope<1> hs2(self);
- Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
+ Handle<Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i)));
f = interface->FindStaticField(self, interface, name, type);
if (f != nullptr) {
return f;
@@ -874,8 +882,9 @@ uint16_t Class::GetDirectInterfaceTypeIdx(uint32_t idx) {
return GetInterfaceTypeList()->GetTypeItem(idx).type_idx_;
}
-mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
- uint32_t idx) {
+ObjPtr<Class> Class::GetDirectInterface(Thread* self,
+ Handle<Class> klass,
+ uint32_t idx) {
DCHECK(klass.Get() != nullptr);
DCHECK(!klass->IsPrimitive());
if (klass->IsArrayClass()) {
@@ -887,12 +896,12 @@ mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> kla
return class_linker->FindSystemClass(self, "Ljava/io/Serializable;");
}
} else if (klass->IsProxyClass()) {
- mirror::ObjectArray<mirror::Class>* interfaces = klass.Get()->GetInterfaces();
+ ObjPtr<ObjectArray<Class>> interfaces = klass.Get()->GetInterfaces();
DCHECK(interfaces != nullptr);
return interfaces->Get(idx);
} else {
uint16_t type_idx = klass->GetDirectInterfaceTypeIdx(idx);
- mirror::Class* interface = klass->GetDexCache()->GetResolvedType(type_idx);
+ ObjPtr<Class> interface = klass->GetDexCache()->GetResolvedType(type_idx);
if (interface == nullptr) {
interface = Runtime::Current()->GetClassLinker()->ResolveType(klass->GetDexFile(), type_idx,
klass.Get());
@@ -902,13 +911,13 @@ mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> kla
}
}
-mirror::Class* Class::GetCommonSuperClass(Handle<Class> klass) {
+ObjPtr<Class> Class::GetCommonSuperClass(Handle<Class> klass) {
DCHECK(klass.Get() != nullptr);
DCHECK(!klass->IsInterface());
DCHECK(!IsInterface());
- mirror::Class* common_super_class = this;
+ ObjPtr<Class> common_super_class = this;
while (!common_super_class->IsAssignableFrom(klass.Get())) {
- mirror::Class* old_common = common_super_class;
+ ObjPtr<Class> old_common = common_super_class;
common_super_class = old_common->GetSuperClass();
DCHECK(common_super_class != nullptr) << PrettyClass(old_common);
}
@@ -926,7 +935,7 @@ const char* Class::GetSourceFile() {
}
std::string Class::GetLocation() {
- mirror::DexCache* dex_cache = GetDexCache();
+ ObjPtr<DexCache> dex_cache = GetDexCache();
if (dex_cache != nullptr && !IsProxyClass()) {
return dex_cache->GetLocation()->ToModifiedUtf8();
}
@@ -959,28 +968,28 @@ void Class::PopulateEmbeddedVTable(PointerSize pointer_size) {
class ReadBarrierOnNativeRootsVisitor {
public:
- void operator()(mirror::Object* obj ATTRIBUTE_UNUSED,
+ void operator()(ObjPtr<Object> obj ATTRIBUTE_UNUSED,
MemberOffset offset ATTRIBUTE_UNUSED,
bool is_static ATTRIBUTE_UNUSED) const {}
- void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ void VisitRootIfNonNull(CompressedReference<Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ void VisitRoot(CompressedReference<Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Object* old_ref = root->AsMirrorPtr();
- mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root);
+ ObjPtr<Object> old_ref = root->AsMirrorPtr();
+ ObjPtr<Object> new_ref = ReadBarrier::BarrierForRoot(root);
if (old_ref != new_ref) {
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
auto* atomic_root =
- reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
+ reinterpret_cast<Atomic<CompressedReference<Object>>*>(root);
atomic_root->CompareExchangeStrongSequentiallyConsistent(
- mirror::CompressedReference<mirror::Object>::FromMirrorPtr(old_ref),
- mirror::CompressedReference<mirror::Object>::FromMirrorPtr(new_ref));
+ CompressedReference<Object>::FromMirrorPtr(old_ref.Ptr()),
+ CompressedReference<Object>::FromMirrorPtr(new_ref.Ptr()));
}
}
};
@@ -989,7 +998,7 @@ class ReadBarrierOnNativeRootsVisitor {
class CopyClassVisitor {
public:
CopyClassVisitor(Thread* self,
- Handle<mirror::Class>* orig,
+ Handle<Class>* orig,
size_t new_length,
size_t copy_bytes,
ImTable* imt,
@@ -998,24 +1007,24 @@ class CopyClassVisitor {
copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
}
- void operator()(ObjPtr<mirror::Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self_);
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
- mirror::Object::CopyObject(h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
- mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
+ Object::CopyObject(h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
+ Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_);
h_new_class_obj->PopulateEmbeddedVTable(pointer_size_);
h_new_class_obj->SetImt(imt_, pointer_size_);
h_new_class_obj->SetClassSize(new_length_);
// Visit all of the references to make sure there is no from space references in the native
// roots.
- static_cast<mirror::Object*>(h_new_class_obj.Get())->VisitReferences(
+ ObjPtr<Object>(h_new_class_obj.Get())->VisitReferences(
ReadBarrierOnNativeRootsVisitor(), VoidFunctor());
}
private:
Thread* const self_;
- Handle<mirror::Class>* const orig_;
+ Handle<Class>* const orig_;
const size_t new_length_;
const size_t copy_bytes_;
ImTable* imt_;
@@ -1027,12 +1036,12 @@ Class* Class::CopyOf(Thread* self, int32_t new_length, ImTable* imt, PointerSize
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_this(hs.NewHandle(this));
+ Handle<Class> h_this(hs.NewHandle(this));
gc::Heap* heap = Runtime::Current()->GetHeap();
// The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
// to skip copying the tail part that we will overwrite here.
CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt, pointer_size);
- mirror::Object* new_class = kMovingClasses ?
+ ObjPtr<Object> new_class = kMovingClasses ?
heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor) :
heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
if (UNLIKELY(new_class == nullptr)) {
@@ -1049,7 +1058,7 @@ bool Class::ProxyDescriptorEquals(const char* match) {
// TODO: Move this to java_lang_Class.cc?
ArtMethod* Class::GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size) {
+ Thread* self, Handle<ObjectArray<Class>> args, PointerSize pointer_size) {
for (auto& m : GetDirectMethods(pointer_size)) {
// Skip <clinit> which is a static constructor, as well as non constructors.
if (m.IsStatic() || !m.IsConstructor()) {
@@ -1068,7 +1077,7 @@ ArtMethod* Class::GetDeclaredConstructor(
uint32_t Class::Depth() {
uint32_t depth = 0;
- for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
+ for (ObjPtr<Class> klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
depth++;
}
return depth;
@@ -1081,10 +1090,11 @@ uint32_t Class::FindTypeIndexInOtherDexFile(const DexFile& dex_file) {
}
template <PointerSize kPointerSize, bool kTransactionActive>
-mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args) {
+ObjPtr<Method> Class::GetDeclaredMethodInternal(
+ Thread* self,
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args) {
// Covariant return types permit the class to define multiple
// methods with the same name and parameter types. Prefer to
// return a non-synthetic method in such situations. We may
@@ -1099,12 +1109,12 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
return nullptr;
}
auto h_args = hs.NewHandle(args);
- Handle<mirror::Class> h_klass = hs.NewHandle(klass);
+ Handle<Class> h_klass = hs.NewHandle(klass);
ArtMethod* result = nullptr;
for (auto& m : h_klass->GetDeclaredVirtualMethods(kPointerSize)) {
auto* np_method = m.GetInterfaceMethodIfProxy(kPointerSize);
// May cause thread suspension.
- mirror::String* np_name = np_method->GetNameAsString(self);
+ ObjPtr<String> np_name = np_method->GetNameAsString(self);
if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
if (UNLIKELY(self->IsExceptionPending())) {
return nullptr;
@@ -1113,7 +1123,7 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
}
auto modifiers = m.GetAccessFlags();
if ((modifiers & kSkipModifiers) == 0) {
- return mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+ return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
if ((modifiers & kAccMiranda) == 0) {
result = &m; // Remember as potential result if it's not a miranda method.
@@ -1127,7 +1137,7 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
}
auto* np_method = m.GetInterfaceMethodIfProxy(kPointerSize);
// May cause thread suspension.
- mirror::String* np_name = np_method->GetNameAsString(self);
+ ObjPtr<String> np_name = np_method->GetNameAsString(self);
if (np_name == nullptr) {
self->AssertPendingException();
return nullptr;
@@ -1139,76 +1149,76 @@ mirror::Method* Class::GetDeclaredMethodInternal(Thread* self,
continue;
}
if ((modifiers & kSkipModifiers) == 0) {
- return mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
+ return Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, &m);
}
// Direct methods cannot be miranda methods, so this potential result must be synthetic.
result = &m;
}
}
return result != nullptr
- ? mirror::Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
+ ? Method::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
: nullptr;
}
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k32, false>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k32, true>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k64, false>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Method* Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
+ObjPtr<Method> Class::GetDeclaredMethodInternal<PointerSize::k64, true>(
Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args);
template <PointerSize kPointerSize, bool kTransactionActive>
-mirror::Constructor* Class::GetDeclaredConstructorInternal(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args) {
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args) {
StackHandleScope<1> hs(self);
ArtMethod* result = klass->GetDeclaredConstructor(self, hs.NewHandle(args), kPointerSize);
return result != nullptr
- ? mirror::Constructor::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
+ ? Constructor::CreateFromArtMethod<kPointerSize, kTransactionActive>(self, result)
: nullptr;
}
-// mirror::Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
+// Constructor::CreateFromArtMethod<kTransactionActive>(self, result)
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, false>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k32, false>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k32, true>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k32, true>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, false>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k64, false>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
template
-mirror::Constructor* Class::GetDeclaredConstructorInternal<PointerSize::k64, true>(
+ObjPtr<Constructor> Class::GetDeclaredConstructorInternal<PointerSize::k64, true>(
Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args);
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args);
int32_t Class::GetInnerClassFlags(Handle<Class> h_this, int32_t default_value) {
if (h_this->IsProxyClass() || h_this->GetDexCache() == nullptr) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 725939a7c4..a5b61fdad3 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -56,7 +56,7 @@ class IfTable;
class Method;
template <typename T> struct PACKED(8) DexCachePair;
-using StringDexCachePair = DexCachePair<mirror::String>;
+using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
// C++ mirror of java.lang.Class
@@ -337,18 +337,7 @@ class MANAGED Class FINAL : public Object {
// For array classes, where all the classes are final due to there being no sub-classes, an
// Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
// types as the component is final.
- bool CannotBeAssignedFromOtherTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!IsArrayClass()) {
- return IsFinal();
- } else {
- Class* component = GetComponentType();
- if (component->IsPrimitive()) {
- return true;
- } else {
- return component->CannotBeAssignedFromOtherTypes();
- }
- }
- }
+ bool CannotBeAssignedFromOtherTypes() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if this class is the placeholder and should retire and
// be replaced with a class with the right size for embedded imt/vtable.
@@ -358,7 +347,7 @@ class MANAGED Class FINAL : public Object {
}
String* GetName() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the cached name.
- void SetName(String* name) REQUIRES_SHARED(Locks::mutator_lock_); // Sets the cached name.
+ void SetName(ObjPtr<String> name) REQUIRES_SHARED(Locks::mutator_lock_); // Sets the cached name.
// Computes the name, then sets the cached value.
static String* ComputeName(Handle<Class> h_this) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -473,7 +462,7 @@ class MANAGED Class FINAL : public Object {
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Class* GetComponentType() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetComponentType(Class* new_component_type) REQUIRES_SHARED(Locks::mutator_lock_) {
+ void SetComponentType(ObjPtr<Class> new_component_type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(GetComponentType() == nullptr);
DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
@@ -508,7 +497,7 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Class* const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
+ ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && !component_type->IsPrimitive();
}
@@ -528,12 +517,12 @@ class MANAGED Class FINAL : public Object {
// Creates a raw object instance but does not invoke the default constructor.
template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
- ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
+ ALWAYS_INLINE ObjPtr<Object> Alloc(Thread* self, gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- Object* AllocObject(Thread* self)
+ ObjPtr<Object> AllocObject(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- Object* AllocNonMovableObject(Thread* self)
+ ObjPtr<Object> AllocNonMovableObject(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -638,11 +627,14 @@ class MANAGED Class FINAL : public Object {
// Can this class access a resolved method?
// Note that access to methods's class is checked and this may require looking up the class
// referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
- bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
- DexCache* dex_cache, uint32_t method_idx)
+ bool CanAccessResolvedMethod(ObjPtr<Class> access_to,
+ ArtMethod* resolved_method,
+ ObjPtr<DexCache> dex_cache,
+ uint32_t method_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
template <InvokeType throw_invoke_type>
- bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
+ bool CheckResolvedMethodAccess(ObjPtr<Class> access_to,
+ ArtMethod* resolved_method,
uint32_t method_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -661,11 +653,12 @@ class MANAGED Class FINAL : public Object {
// Get first common super class. It will never return null.
// `This` and `klass` must be classes.
- Class* GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<Class> GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetSuperClass(Class* new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
+ void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
- Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
+ ObjPtr<Class> old_super_class =
+ GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
DCHECK(new_super_class != nullptr);
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
@@ -681,7 +674,7 @@ class MANAGED Class FINAL : public Object {
ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- void SetClassLoader(ClassLoader* new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -699,7 +692,7 @@ class MANAGED Class FINAL : public Object {
DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
// Also updates the dex_cache_strings_ variable from new_dex_cache.
- void SetDexCache(DexCache* new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetDexCache(ObjPtr<DexCache> new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -753,15 +746,16 @@ class MANAGED Class FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive>
- static Method* GetDeclaredMethodInternal(Thread* self,
- mirror::Class* klass,
- mirror::String* name,
- mirror::ObjectArray<mirror::Class>* args)
+ static ObjPtr<Method> GetDeclaredMethodInternal(Thread* self,
+ ObjPtr<Class> klass,
+ ObjPtr<String> name,
+ ObjPtr<ObjectArray<Class>> args)
REQUIRES_SHARED(Locks::mutator_lock_);
+
template <PointerSize kPointerSize, bool kTransactionActive>
- static Constructor* GetDeclaredConstructorInternal(Thread* self,
- mirror::Class* klass,
- mirror::ObjectArray<mirror::Class>* args)
+ static ObjPtr<Constructor> GetDeclaredConstructorInternal(Thread* self,
+ ObjPtr<Class> klass,
+ ObjPtr<ObjectArray<Class>> args)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -894,69 +888,86 @@ class MANAGED Class FINAL : public Object {
ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindInterfaceMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindInterfaceMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindInterfaceMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindDeclaredDirectMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindDeclaredDirectMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindDirectMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindDirectMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindDirectMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindDeclaredVirtualMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size)
+ ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size)
+ ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
+ ArtMethod* FindVirtualMethod(const StringPiece& name,
+ const StringPiece& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
+ ArtMethod* FindVirtualMethod(const StringPiece& name,
+ const Signature& signature,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
+ ArtMethod* FindVirtualMethod(ObjPtr<DexCache> dex_cache,
+ uint32_t dex_method_idx,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -976,7 +987,8 @@ class MANAGED Class FINAL : public Object {
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE IfTable* GetIfTable() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetIfTable(ObjPtr<IfTable> new_iftable)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
LengthPrefixedArray<ArtField>* GetIFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1077,32 +1089,34 @@ class MANAGED Class FINAL : public Object {
// Finds the given instance field in this class or a superclass, only searches classes that
// have the same dex cache.
- ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
+ ArtField* FindInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
+ ArtField* FindDeclaredInstanceField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or a superclass.
- static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
+ static ArtField* FindStaticField(Thread* self,
+ Handle<Class> klass,
+ const StringPiece& name,
const StringPiece& type)
REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or superclass, only searches classes that
// have the same dex cache.
static ArtField* FindStaticField(Thread* self,
- Class* klass,
- const DexCache* dex_cache,
+ ObjPtr<Class> klass,
+ ObjPtr<DexCache> dex_cache,
uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
REQUIRES_SHARED(Locks::mutator_lock_);
- ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
+ ArtField* FindDeclaredStaticField(ObjPtr<DexCache> dex_cache, uint32_t dex_field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
pid_t GetClinitThreadId() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1148,7 +1162,7 @@ class MANAGED Class FINAL : public Object {
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
- static void SetClassClass(Class* java_lang_Class) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void SetClassClass(ObjPtr<Class> java_lang_Class) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1178,8 +1192,9 @@ class MANAGED Class FINAL : public Object {
uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
- uint32_t idx)
+ static ObjPtr<Class> GetDirectInterface(Thread* self,
+ Handle<Class> klass,
+ uint32_t idx)
REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1194,7 +1209,9 @@ class MANAGED Class FINAL : public Object {
void AssertInitializedOrInitializingInThread(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_);
- Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
+ Class* CopyOf(Thread* self,
+ int32_t new_length,
+ ImTable* imt,
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
@@ -1218,8 +1235,9 @@ class MANAGED Class FINAL : public Object {
}
// May cause thread suspension due to EqualParameters.
- ArtMethod* GetDeclaredConstructor(
- Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size)
+ ArtMethod* GetDeclaredConstructor(Thread* self,
+ Handle<ObjectArray<Class>> args,
+ PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
static int32_t GetInnerClassFlags(Handle<Class> h_this, int32_t default_value)
@@ -1232,7 +1250,7 @@ class MANAGED Class FINAL : public Object {
explicit InitializeClassVisitor(uint32_t class_size) : class_size_(class_size) {
}
- void operator()(ObjPtr<mirror::Object> obj, size_t usable_size) const
+ void operator()(ObjPtr<Object> obj, size_t usable_size) const
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -1277,14 +1295,14 @@ class MANAGED Class FINAL : public Object {
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void FixupNativePointers(mirror::Class* dest, PointerSize pointer_size, const Visitor& visitor)
+ void FixupNativePointers(Class* dest, PointerSize pointer_size, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
REQUIRES_SHARED(Locks::mutator_lock_);
- void SetVerifyError(Object* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetVerifyError(ObjPtr<Object> klass) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
@@ -1300,7 +1318,7 @@ class MANAGED Class FINAL : public Object {
ObjPtr<DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool Implements(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool Implements(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsArrayAssignableFromArray(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAssignableFromArray(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1333,7 +1351,7 @@ class MANAGED Class FINAL : public Object {
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
// 'Class' Object Fields
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 445f23fa08..adc5107570 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -48,7 +48,7 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, b
self->ClearException();
}
}
- auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self)));
+ auto ret = hs.NewHandle(ObjPtr<Field>::DownCast(StaticClass()->AllocObject(self)));
if (UNLIKELY(ret.Get() == nullptr)) {
self->AssertPendingOOMException();
return nullptr;
@@ -80,7 +80,7 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, b
template<bool kTransactionActive>
void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
- SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c.Ptr());
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
} // namespace mirror
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index 71bac7e3d6..7ddadda06b 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -54,12 +54,12 @@ void Method::ResetArrayClass() {
template <PointerSize kPointerSize, bool kTransactionActive>
Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << PrettyMethod(method);
- auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
+ ObjPtr<Method> ret = ObjPtr<Method>::DownCast(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<Executable*>(ret)->
+ ObjPtr<Executable>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
- return ret;
+ return ret.Ptr();
}
template Method* Method::CreateFromArtMethod<PointerSize::k32, false>(Thread* self,
@@ -106,12 +106,12 @@ void Constructor::VisitRoots(RootVisitor* visitor) {
template <PointerSize kPointerSize, bool kTransactionActive>
Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << PrettyMethod(method);
- auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
+ ObjPtr<Constructor> ret = ObjPtr<Constructor>::DownCast(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<Executable*>(ret)->
+ ObjPtr<Executable>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
- return ret;
+ return ret.Ptr();
}
template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, false>(
diff --git a/runtime/mirror/method_type.cc b/runtime/mirror/method_type.cc
index ba6ea5e4ff..0b52931b76 100644
--- a/runtime/mirror/method_type.cc
+++ b/runtime/mirror/method_type.cc
@@ -29,7 +29,7 @@ mirror::MethodType* MethodType::Create(Thread* const self,
Handle<ObjectArray<Class>> param_types) {
StackHandleScope<1> hs(self);
Handle<mirror::MethodType> mt(
- hs.NewHandle(static_cast<MethodType*>(StaticClass()->AllocObject(self))));
+ hs.NewHandle(ObjPtr<MethodType>::DownCast(StaticClass()->AllocObject(self))));
// TODO: Do we ever create a MethodType during a transaction ? There doesn't
// seem like a good reason to do a polymorphic invoke that results in the
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index d5bc2564bd..3c2390b08d 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -25,6 +25,7 @@
#include "base/stringprintf.h"
#include "gc/heap.h"
#include "mirror/class.h"
+#include "obj_ptr-inl.h"
#include "runtime.h"
#include "handle_scope-inl.h"
#include "thread.h"
@@ -34,24 +35,29 @@ namespace art {
namespace mirror {
template<class T>
-inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
int32_t length, gc::AllocatorType allocator_type) {
- Array* array = Array::Alloc<true>(self, object_array_class, length,
- ComponentSizeShiftWidth(sizeof(HeapReference<Object>)),
+ Array* array = Array::Alloc<true>(self,
+ object_array_class.Ptr(),
+ length,
+ ComponentSizeShiftWidth(kHeapReferenceSize),
allocator_type);
if (UNLIKELY(array == nullptr)) {
return nullptr;
- } else {
- DCHECK_EQ(array->GetClass()->GetComponentSizeShift(),
- ComponentSizeShiftWidth(sizeof(HeapReference<Object>)));
- return array->AsObjectArray<T>();
}
+ DCHECK_EQ(array->GetClass()->GetComponentSizeShift(),
+ ComponentSizeShiftWidth(kHeapReferenceSize));
+ return array->AsObjectArray<T>();
}
template<class T>
-inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
int32_t length) {
- return Alloc(self, object_array_class, length,
+ return Alloc(self,
+ object_array_class,
+ length,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
@@ -65,7 +71,7 @@ inline T* ObjectArray<T>::Get(int32_t i) {
}
template<class T> template<VerifyObjectFlags kVerifyFlags>
-inline bool ObjectArray<T>::CheckAssignable(T* object) {
+inline bool ObjectArray<T>::CheckAssignable(ObjPtr<T> object) {
if (object != nullptr) {
Class* element_class = GetClass<kVerifyFlags>()->GetComponentType();
if (UNLIKELY(!object->InstanceOf(element_class))) {
@@ -77,7 +83,7 @@ inline bool ObjectArray<T>::CheckAssignable(T* object) {
}
template<class T>
-inline void ObjectArray<T>::Set(int32_t i, T* object) {
+inline void ObjectArray<T>::Set(int32_t i, ObjPtr<T> object) {
if (Runtime::Current()->IsActiveTransaction()) {
Set<true>(i, object);
} else {
@@ -87,7 +93,7 @@ inline void ObjectArray<T>::Set(int32_t i, T* object) {
template<class T>
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void ObjectArray<T>::Set(int32_t i, T* object) {
+inline void ObjectArray<T>::Set(int32_t i, ObjPtr<T> object) {
if (CheckIsValidIndex(i) && CheckAssignable<kVerifyFlags>(object)) {
SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object);
} else {
@@ -97,7 +103,7 @@ inline void ObjectArray<T>::Set(int32_t i, T* object) {
template<class T>
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void ObjectArray<T>::SetWithoutChecks(int32_t i, T* object) {
+inline void ObjectArray<T>::SetWithoutChecks(int32_t i, ObjPtr<T> object) {
DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
DCHECK(CheckAssignable<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(object));
SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object);
@@ -105,7 +111,7 @@ inline void ObjectArray<T>::SetWithoutChecks(int32_t i, T* object) {
template<class T>
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, T* object) {
+inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, ObjPtr<T> object) {
DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
// TODO: enable this check. It fails when writing the image in ImageWriter::FixupObjectArray.
// DCHECK(CheckAssignable(object));
@@ -120,8 +126,10 @@ inline T* ObjectArray<T>::GetWithoutChecks(int32_t i) {
}
template<class T>
-inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src,
- int32_t src_pos, int32_t count) {
+inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
// The get will perform the VerifyObject.
@@ -160,8 +168,10 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* s
}
template<class T>
-inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src,
- int32_t src_pos, int32_t count) {
+inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
// The get will perform the VerifyObject.
@@ -190,8 +200,10 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* sr
template<class T>
template<bool kTransactionActive>
-inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src,
- int32_t src_pos, int32_t count,
+inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count,
bool throw_exception) {
DCHECK_NE(this, src)
<< "This case should be handled with memmove that handles overlaps correctly";
@@ -258,8 +270,7 @@ inline ObjectArray<T>* ObjectArray<T>::CopyOf(Thread* self, int32_t new_length)
template<class T>
inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) {
- return MemberOffset(DataOffset(sizeof(HeapReference<Object>)).Int32Value() +
- (i * sizeof(HeapReference<Object>)));
+ return MemberOffset(DataOffset(kHeapReferenceSize).Int32Value() + (i * kHeapReferenceSize));
}
template<class T> template<typename Visitor>
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 19b9d87ece..e4e954e4c1 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_OBJECT_ARRAY_H_
#include "array.h"
+#include "obj_ptr.h"
namespace art {
namespace mirror {
@@ -30,11 +31,15 @@ class MANAGED ObjectArray: public Array {
return Array::ClassSize(pointer_size);
}
- static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
+ static ObjectArray<T>* Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
+ int32_t length,
gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
+ static ObjectArray<T>* Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
+ int32_t length)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -45,13 +50,13 @@ class MANAGED ObjectArray: public Array {
// an ArrayStoreException and returns false.
// TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
+ bool CheckAssignable(ObjPtr<T> object) NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE void Set(int32_t i, T* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void Set(int32_t i, ObjPtr<T> object) REQUIRES_SHARED(Locks::mutator_lock_);
// TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
+ ALWAYS_INLINE void Set(int32_t i, ObjPtr<T> object) NO_THREAD_SAFETY_ANALYSIS;
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
@@ -59,32 +64,42 @@ class MANAGED ObjectArray: public Array {
// REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
+ ALWAYS_INLINE void SetWithoutChecks(int32_t i, ObjPtr<T> object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
// REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
+ ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, ObjPtr<T> object)
NO_THREAD_SAFETY_ANALYSIS;
ALWAYS_INLINE T* GetWithoutChecks(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
- void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssignableMemmove(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array assuming no overlap and without assignability checks.
- void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssignableMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array with assignability checks.
template<bool kTransactionActive>
- void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count, bool throw_exception)
+ void AssignableCheckingMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count,
+ bool throw_exception)
REQUIRES_SHARED(Locks::mutator_lock_);
ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
static MemberOffset OffsetOfElement(int32_t i);
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index 60955d60df..e70b93607e 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -24,7 +24,11 @@
namespace art {
namespace mirror {
-// References between objects within the managed heap.
+template<bool kPoisonReferences, class MirrorType>
+void ObjectReference<kPoisonReferences, MirrorType>::Assign(ObjPtr<MirrorType> ptr) {
+ Assign(ptr.Ptr());
+}
+
template<class MirrorType>
HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorType> ptr) {
return HeapReference<MirrorType>(ptr.Ptr());
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 573cb308bd..71f34c66e2 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -42,6 +42,9 @@ class MANAGED ObjectReference {
reference_ = Compress(other);
}
+ void Assign(ObjPtr<MirrorType> ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void Clear() {
reference_ = 0;
DCHECK(IsNull());
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 062afd31ee..60e2bf81e6 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -139,10 +139,10 @@ TEST_F(ObjectTest, AllocObjectArray) {
ASSERT_TRUE(oa->GetClass() != nullptr);
Handle<mirror::Class> klass(hs.NewHandle(oa->GetClass()));
ASSERT_EQ(2U, klass->NumDirectInterfaces());
- EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
- mirror::Class::GetDirectInterface(soa.Self(), klass, 0));
- EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;"),
- mirror::Class::GetDirectInterface(soa.Self(), klass, 1));
+ EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
+ mirror::Class::GetDirectInterface(soa.Self(), klass, 0));
+ EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;"),
+ mirror::Class::GetDirectInterface(soa.Self(), klass, 1));
}
TEST_F(ObjectTest, AllocArray) {
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 039989bcb4..a449b41087 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -19,6 +19,8 @@
#include "reference.h"
+#include "obj_ptr-inl.h"
+
namespace art {
namespace mirror {
@@ -27,6 +29,24 @@ inline uint32_t Reference::ClassSize(PointerSize pointer_size) {
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
+template<bool kTransactionActive>
+inline void Reference::SetReferent(ObjPtr<Object> referent) {
+ SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
+}
+
+inline void Reference::SetPendingNext(ObjPtr<Reference> pending_next) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetFieldObject<true>(PendingNextOffset(), pending_next);
+ } else {
+ SetFieldObject<false>(PendingNextOffset(), pending_next);
+ }
+}
+
+template<bool kTransactionActive>
+inline void FinalizerReference::SetZombie(ObjPtr<Object> zombie) {
+ return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 3c7f8c8e63..1d0b4c5b27 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "reference.h"
+#include "reference-inl.h"
#include "art_method.h"
#include "gc_root-inl.h"
@@ -24,7 +24,7 @@ namespace mirror {
GcRoot<Class> Reference::java_lang_ref_Reference_;
-void Reference::SetClass(Class* java_lang_ref_Reference) {
+void Reference::SetClass(ObjPtr<Class> java_lang_ref_Reference) {
CHECK(java_lang_ref_Reference_.IsNull());
CHECK(java_lang_ref_Reference != nullptr);
java_lang_ref_Reference_ = GcRoot<Class>(java_lang_ref_Reference);
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 6a8b32b62d..f2fa589500 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "class.h"
#include "gc_root.h"
+#include "obj_ptr.h"
#include "object.h"
#include "object_callbacks.h"
#include "read_barrier_option.h"
@@ -69,9 +70,7 @@ class MANAGED Reference : public Object {
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
- }
+ void SetReferent(ObjPtr<Object> referent) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
@@ -82,14 +81,7 @@ class MANAGED Reference : public Object {
return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
- void SetPendingNext(Reference* pending_next)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(PendingNextOffset(), pending_next);
- } else {
- SetFieldObject<false>(PendingNextOffset(), pending_next);
- }
- }
+ void SetPendingNext(ObjPtr<Reference> pending_next) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the reference's pendingNext is null, indicating it is
// okay to process this reference.
@@ -112,7 +104,7 @@ class MANAGED Reference : public Object {
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
- static void SetClass(Class* klass);
+ static void SetClass(ObjPtr<Class> klass);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -144,9 +136,8 @@ class MANAGED FinalizerReference : public Reference {
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
- return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
- }
+ void SetZombie(ObjPtr<Object> zombie) REQUIRES_SHARED(Locks::mutator_lock_);
+
Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index 96f6a53396..e2050cc144 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -42,8 +42,8 @@ void StackTraceElement::ResetClass() {
StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
Handle<String> method_name, Handle<String> file_name,
int32_t line_number) {
- StackTraceElement* trace =
- down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
+ ObjPtr<StackTraceElement> trace =
+ ObjPtr<StackTraceElement>::DownCast(GetStackTraceElement()->AllocObject(self));
if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
trace->Init<true>(declaring_class, method_name, file_name, line_number);
@@ -51,7 +51,7 @@ StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declari
trace->Init<false>(declaring_class, method_name, file_name, line_number);
}
}
- return trace;
+ return trace.Ptr();
}
template<bool kTransactionActive>
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 2a5c04d54b..ceb37c488e 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -278,7 +278,7 @@ static mirror::Field* GetPublicFieldRecursive(
uint32_t num_direct_interfaces = h_clazz->NumDirectInterfaces();
for (uint32_t i = 0; i < num_direct_interfaces; i++) {
- mirror::Class *iface = mirror::Class::GetDirectInterface(self, h_clazz, i);
+ ObjPtr<mirror::Class> iface = mirror::Class::GetDirectInterface(self, h_clazz, i);
if (UNLIKELY(iface == nullptr)) {
self->AssertPendingException();
return nullptr;
@@ -354,8 +354,8 @@ static jobject Class_getDeclaredConstructorInternal(
ObjPtr<mirror::Constructor> result =
mirror::Class::GetDeclaredConstructorInternal<kRuntimePointerSize, false>(
soa.Self(),
- DecodeClass(soa, javaThis).Ptr(),
- soa.Decode<mirror::ObjectArray<mirror::Class>>(args).Ptr());
+ DecodeClass(soa, javaThis),
+ soa.Decode<mirror::ObjectArray<mirror::Class>>(args));
return soa.AddLocalReference<jobject>(result);
}
@@ -403,11 +403,12 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
ScopedFastNativeObjectAccess soa(env);
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- mirror::Method* result = mirror::Class::GetDeclaredMethodInternal<kRuntimePointerSize, false>(
- soa.Self(),
- DecodeClass(soa, javaThis).Ptr(),
- soa.Decode<mirror::String>(name).Ptr(),
- soa.Decode<mirror::ObjectArray<mirror::Class>>(args).Ptr());
+ ObjPtr<mirror::Method> result =
+ mirror::Class::GetDeclaredMethodInternal<kRuntimePointerSize, false>(
+ soa.Self(),
+ DecodeClass(soa, javaThis),
+ soa.Decode<mirror::String>(name),
+ soa.Decode<mirror::ObjectArray<mirror::Class>>(args));
return soa.AddLocalReference<jobject>(result);
}
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index f3756a23c7..3f5fa73b45 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -35,7 +35,8 @@ namespace art {
* References are never torn regardless of the number of bits used to represent them.
*/
-static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
+static void ThrowArrayStoreException_NotAnArray(const char* identifier,
+ ObjPtr<mirror::Object> array)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
@@ -62,12 +63,12 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
// Make sure source and destination are both arrays.
ObjPtr<mirror::Object> srcObject = soa.Decode<mirror::Object>(javaSrc);
if (UNLIKELY(!srcObject->IsArrayInstance())) {
- ThrowArrayStoreException_NotAnArray("source", srcObject.Ptr());
+ ThrowArrayStoreException_NotAnArray("source", srcObject);
return;
}
ObjPtr<mirror::Object> dstObject = soa.Decode<mirror::Object>(javaDst);
if (UNLIKELY(!dstObject->IsArrayInstance())) {
- ThrowArrayStoreException_NotAnArray("destination", dstObject.Ptr());
+ ThrowArrayStoreException_NotAnArray("destination", dstObject);
return;
}
mirror::Array* srcArray = srcObject->AsArray();
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index 95f6d517e5..bedca109aa 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -28,8 +28,8 @@ namespace art {
static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Reference> ref = soa.Decode<mirror::Reference>(javaThis);
- mirror::Object* const referent =
- Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref.Ptr());
+ ObjPtr<mirror::Object> const referent =
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref);
return soa.AddLocalReference<jobject>(referent);
}
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 7de0147103..505f85d94c 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -104,7 +104,7 @@ static jobject Constructor_newInstance0(JNIEnv* env, jobject javaMethod, jobject
return InvokeMethod(soa, javaMethod, nullptr, javaArgs, 2);
}
- mirror::Object* receiver =
+ ObjPtr<mirror::Object> receiver =
movable ? c->AllocObject(soa.Self()) : c->AllocNonMovableObject(soa.Self());
if (receiver == nullptr) {
return nullptr;
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index e317c25d88..a0a6a12505 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -38,7 +38,7 @@ static jobjectArray Executable_getDeclaredAnnotationsNative(JNIEnv* env, jobject
ObjPtr<mirror::Class> annotation_array_class =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class.Ptr(), 0);
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
}
return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForMethod(method));
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 0bdb5a42a1..670c4ac19a 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -71,8 +71,8 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb
field_addr);
}
bool success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
- expectedValue.Ptr(),
- newValue.Ptr());
+ expectedValue,
+ newValue);
return success ? JNI_TRUE : JNI_FALSE;
}
@@ -168,7 +168,7 @@ static void Unsafe_putObject(JNIEnv* env, jobject, jobject javaObj, jlong offset
ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- obj->SetFieldObject<false>(MemberOffset(offset), newValue.Ptr());
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -177,7 +177,7 @@ static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlon
ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- obj->SetFieldObjectVolatile<false>(MemberOffset(offset), newValue.Ptr());
+ obj->SetFieldObjectVolatile<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -187,7 +187,7 @@ static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
QuasiAtomic::ThreadFenceRelease();
// JNI must use non transactional mode.
- obj->SetFieldObject<false>(MemberOffset(offset), newValue.Ptr());
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
}
static jint Unsafe_getArrayBaseOffsetForComponentType(JNIEnv* env, jclass, jobject component_class) {
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 05da585b3a..ac8d5e12d1 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -476,7 +476,8 @@ class JvmtiFunctions {
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ HeapUtil heap_util(&gObjectTagTable);
+ return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
static jvmtiError GetClassLoaderClasses(jvmtiEnv* env,
diff --git a/runtime/openjdkjvmti/heap.cc b/runtime/openjdkjvmti/heap.cc
index 95d9a1d315..859941c81f 100644
--- a/runtime/openjdkjvmti/heap.cc
+++ b/runtime/openjdkjvmti/heap.cc
@@ -19,7 +19,10 @@
#include "art_jvmti.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "class_linker.h"
#include "gc/heap.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext.h"
#include "mirror/class.h"
#include "object_callbacks.h"
#include "object_tagging.h"
@@ -163,4 +166,49 @@ jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
+jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
+ jint* class_count_ptr,
+ jclass** classes_ptr) {
+ if (class_count_ptr == nullptr || classes_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ class ReportClassVisitor : public art::ClassVisitor {
+ public:
+ explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
+
+ bool operator()(art::mirror::Class* klass) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::JNIEnvExt* jni_env = self_->GetJniEnv();
+ classes_.push_back(reinterpret_cast<jclass>(jni_env->vm->AddGlobalRef(self_, klass)));
+ return true;
+ }
+
+ art::Thread* self_;
+ std::vector<jclass> classes_;
+ };
+
+ art::Thread* self = art::Thread::Current();
+ ReportClassVisitor rcv(self);
+ {
+ art::ScopedObjectAccess soa(self);
+ art::Runtime::Current()->GetClassLinker()->VisitClasses(&rcv);
+ }
+
+ size_t size = rcv.classes_.size();
+ jclass* classes = nullptr;
+ jvmtiError alloc_ret = env->Allocate(static_cast<jlong>(size * sizeof(jclass)),
+ reinterpret_cast<unsigned char**>(&classes));
+ if (alloc_ret != ERR(NONE)) {
+ return alloc_ret;
+ }
+
+ for (size_t i = 0; i < size; ++i) {
+ classes[i] = rcv.classes_[i];
+ }
+ *classes_ptr = classes;
+ *class_count_ptr = static_cast<jint>(size);
+
+ return ERR(NONE);
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/heap.h b/runtime/openjdkjvmti/heap.h
index fb9a2164ae..b6becb97bb 100644
--- a/runtime/openjdkjvmti/heap.h
+++ b/runtime/openjdkjvmti/heap.h
@@ -28,6 +28,8 @@ class HeapUtil {
explicit HeapUtil(ObjectTagTable* tags) : tags_(tags) {
}
+ jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr);
+
jvmtiError IterateThroughHeap(jvmtiEnv* env,
jint heap_filter,
jclass klass,
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 84985c2997..32a55822b7 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -128,8 +128,8 @@ TEST_F(ProxyTest, ProxyClassHelper) {
ASSERT_TRUE(proxy_class->IsInitialized());
EXPECT_EQ(2U, proxy_class->NumDirectInterfaces()); // Interfaces$I and Interfaces$J.
- EXPECT_EQ(I.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 0));
- EXPECT_EQ(J.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 1));
+ EXPECT_OBJ_PTR_EQ(I.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 0));
+ EXPECT_OBJ_PTR_EQ(J.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 1));
std::string temp;
const char* proxy_class_descriptor = proxy_class->GetDescriptor(&temp);
EXPECT_STREQ("L$Proxy1234;", proxy_class_descriptor);
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index 52cdfb817a..c4d4fae17c 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -21,7 +21,7 @@
#include "base/stringprintf.h"
#include "common_throws.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
#include "mirror/object-inl.h"
#include "obj_ptr-inl.h"
#include "primitive.h"
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 098eb0377a..72db8272f2 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -772,7 +772,7 @@ static bool UnboxPrimitive(ObjPtr<mirror::Object> o,
}
return false;
}
- unboxed_value->SetL(o.Ptr());
+ unboxed_value->SetL(o);
return true;
}
if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) {
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 189ed03fb0..22076bbc05 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -83,14 +83,15 @@ class ReflectionTest : public CommonCompilerTest {
}
void ReflectionTestMakeExecutable(ArtMethod** method,
- mirror::Object** receiver,
- bool is_static, const char* method_name,
+ ObjPtr<mirror::Object>* receiver,
+ bool is_static,
+ const char* method_name,
const char* method_signature)
REQUIRES_SHARED(Locks::mutator_lock_) {
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
- StackHandleScope<2> hs(self);
+ StackHandleScope<3> hs(self);
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(
ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader>(jclass_loader)));
@@ -100,8 +101,9 @@ class ReflectionTest : public CommonCompilerTest {
}
MakeExecutable(class_loader.Get(), class_name);
- mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
- class_loader);
+ ObjPtr<mirror::Class> c = class_linker_->FindClass(self,
+ DotToDescriptor(class_name).c_str(),
+ class_loader);
CHECK(c != nullptr);
*method = is_static ? c->FindDirectMethod(method_name, method_signature, kRuntimePointerSize)
@@ -112,14 +114,17 @@ class ReflectionTest : public CommonCompilerTest {
*receiver = nullptr;
} else {
// Ensure class is initialized before allocating object
- StackHandleScope<1> hs2(self);
- Handle<mirror::Class> h_class(hs2.NewHandle(c));
- bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
- CHECK(initialized);
+ {
+ StackHandleScope<1> hs2(self);
+ HandleWrapperObjPtr<mirror::Class> h_class(hs2.NewHandleWrapper(&c));
+ bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
+ CHECK(initialized);
+ }
*receiver = c->AllocObject(self);
}
// Start runtime.
+ HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(receiver));
bool started = runtime_->Start();
CHECK(started);
self->TransitionFromSuspendedToRunnable();
@@ -128,7 +133,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeNopMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), nullptr);
@@ -137,7 +142,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityByteMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(B)B");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
@@ -163,7 +168,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(I)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
@@ -188,7 +193,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeIdentityDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(D)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
@@ -213,7 +218,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(II)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[2];
@@ -242,7 +247,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(III)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[3];
@@ -281,7 +286,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIII)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[4];
@@ -325,7 +330,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumIntIntIntIntIntMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIIII)I");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[5];
@@ -374,7 +379,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[2];
@@ -408,7 +413,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[3];
@@ -435,7 +440,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[4];
@@ -465,7 +470,7 @@ class ReflectionTest : public CommonCompilerTest {
void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) {
ScopedObjectAccess soa(env_);
ArtMethod* method;
- mirror::Object* receiver;
+ ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDDD)D");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[5];
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index 1ebfd30ff7..bde23c8028 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -83,7 +83,7 @@ template<typename T, bool kPoison>
inline ObjPtr<T, kPoison> ScopedObjectAccessAlreadyRunnable::Decode(jobject obj) const {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- return down_cast<T*>(Self()->DecodeJObject(obj).Ptr());
+ return ObjPtr<T, kPoison>::DownCast(Self()->DecodeJObject(obj));
}
inline ArtField* ScopedObjectAccessAlreadyRunnable::DecodeField(jfieldID fid) const {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 45d3e348d6..6acce273c9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2272,7 +2272,7 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
}
DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
Handle<mirror::Throwable> exception(
- hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
+ hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this))));
// If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
if (exception.Get() == nullptr) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 376a69c6c0..6f5913e6b3 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -570,6 +570,10 @@ class Thread {
OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
}
+ static constexpr size_t IsGcMarkingSize() {
+ return sizeof(tls32_.is_gc_marking);
+ }
+
// Deoptimize the Java stack.
void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/test/907-get-loaded-classes/build b/test/907-get-loaded-classes/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/907-get-loaded-classes/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/907-get-loaded-classes/expected.txt b/test/907-get-loaded-classes/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/907-get-loaded-classes/expected.txt
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
new file mode 100644
index 0000000000..e752bcbbeb
--- /dev/null
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "get_loaded_classes.h"
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test907GetLoadedClasses {
+
+static jstring GetClassName(JNIEnv* jni_env, jclass cls) {
+ ScopedLocalRef<jclass> class_class(jni_env, jni_env->GetObjectClass(cls));
+ jmethodID mid = jni_env->GetMethodID(class_class.get(), "getName", "()Ljava/lang/String;");
+ return reinterpret_cast<jstring>(jni_env->CallObjectMethod(cls, mid));
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getLoadedClasses(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ jint count = -1;
+ jclass* classes = nullptr;
+ jvmtiError result = jvmti_env->GetLoadedClasses(&count, &classes);
+ if (result != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(result, &err);
+ printf("Failure running GetLoadedClasses: %s\n", err);
+ return nullptr;
+ }
+
+ ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String"));
+ if (obj_class.get() == nullptr) {
+ return nullptr;
+ }
+
+ jobjectArray ret = env->NewObjectArray(count, obj_class.get(), nullptr);
+ if (ret == nullptr) {
+ return ret;
+ }
+
+ for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
+ jstring class_name = GetClassName(env, classes[i]);
+ env->SetObjectArrayElement(ret, static_cast<jint>(i), class_name);
+ env->DeleteLocalRef(class_name);
+ }
+
+ // Need to:
+ // 1) Free the local references.
+ // 2) Deallocate.
+ for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
+ env->DeleteGlobalRef(classes[i]);
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(classes));
+
+ return ret;
+}
+
+} // namespace Test907GetLoadedClasses
+} // namespace art
diff --git a/test/907-get-loaded-classes/get_loaded_classes.h b/test/907-get-loaded-classes/get_loaded_classes.h
new file mode 100644
index 0000000000..4d27f898cc
--- /dev/null
+++ b/test/907-get-loaded-classes/get_loaded_classes.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
+#define ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test907GetLoadedClasses {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test907GetLoadedClasses
+} // namespace art
+
+#endif // ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
diff --git a/test/907-get-loaded-classes/info.txt b/test/907-get-loaded-classes/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/907-get-loaded-classes/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
new file mode 100755
index 0000000000..3e135a378d
--- /dev/null
+++ b/test/907-get-loaded-classes/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/907-get-loaded-classes/src/Main.java b/test/907-get-loaded-classes/src/Main.java
new file mode 100644
index 0000000000..468d037a52
--- /dev/null
+++ b/test/907-get-loaded-classes/src/Main.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Ensure some classes are loaded.
+ A a = new A();
+ B b = new B();
+ A[] aArray = new A[5];
+
+ String[] classes = getLoadedClasses();
+ HashSet<String> classesSet = new HashSet<>(Arrays.asList(classes));
+
+ String[] shouldBeLoaded = new String[] {
+ "java.lang.Object", "java.lang.Class", "java.lang.String", "Main$A", "Main$B", "[LMain$A;"
+ };
+
+ boolean error = false;
+ for (String s : shouldBeLoaded) {
+ if (!classesSet.contains(s)) {
+ System.out.println("Did not find " + s);
+ error = true;
+ }
+ }
+
+ if (error) {
+ System.out.println(Arrays.toString(classes));
+ }
+ }
+
+ static class A {
+ }
+
+ static class B {
+ }
+
+ private static native String[] getLoadedClasses();
+}
diff --git a/test/955-methodhandles-smali/expected.txt b/test/955-methodhandles-smali/expected.txt
index 07d2422169..047a287eb8 100644
--- a/test/955-methodhandles-smali/expected.txt
+++ b/test/955-methodhandles-smali/expected.txt
@@ -1,2 +1,8 @@
[String1]+[String2]
[String1]
+[String1]+[String2]
+42
+40
+43
+44
+0-11
diff --git a/test/955-methodhandles-smali/smali/Main.smali b/test/955-methodhandles-smali/smali/Main.smali
index 2fc92f8a83..9681d56f00 100644
--- a/test/955-methodhandles-smali/smali/Main.smali
+++ b/test/955-methodhandles-smali/smali/Main.smali
@@ -66,8 +66,31 @@
return-object v0
.end method
-# Returns a method handle to static String java.lang.String.valueOf(String);
-.method public static getStringValueOfHandle()Ljava/lang/invoke/MethodHandle;
+# Returns a method handle to boolean java.lang.Long.compareTo(java.lang.Long other).
+.method public static getLongCompareToHandle()Ljava/lang/invoke/MethodHandle;
+.registers 4
+ new-instance v0, Ljava/lang/Long;
+ const-wide v1, 0
+ invoke-direct {v0, v1, v2}, Ljava/lang/Long;-><init>(J)V
+ invoke-virtual {v0}, Ljava/lang/Object;->getClass()Ljava/lang/Class;
+ move-result-object v0
+
+ # set v0 to Integer.TYPE aka. int.class
+ sget-object v1, Ljava/lang/Integer;->TYPE:Ljava/lang/Class;
+
+ # Call MethodType.methodType(rtype=int.class, ptype[0] = Long.class)
+ invoke-static {v1, v0}, Ljava/lang/invoke/MethodType;->methodType(Ljava/lang/Class;Ljava/lang/Class;)Ljava/lang/invoke/MethodType;
+ move-result-object v2
+
+ const-string v3, "compareTo"
+ # Call Main.getHandleForVirtual(Long.class, "compareTo", methodType);
+ invoke-static {v0, v3, v2}, LMain;->getHandleForVirtual(Ljava/lang/Class;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ return-object v0
+.end method
+
+# Returns a method handle to static String java.lang.String.valueOf(Object);
+.method public static getStringValueOfObjectHandle()Ljava/lang/invoke/MethodHandle;
.registers 4
# set v0 to java.lang.Object.class
new-instance v0, Ljava/lang/Object;
@@ -90,6 +113,26 @@
return-object v0
.end method
+# Returns a method handle to static String java.lang.String.valueOf(String);
+.method public static getStringValueOfLongHandle()Ljava/lang/invoke/MethodHandle;
+.registers 4
+ # set v0 to Long.TYPE aka. long.class
+ sget-object v0, Ljava/lang/Long;->TYPE:Ljava/lang/Class;
+
+ # set v1 to the name of the method ("valueOf") and v2 to java.lang.String.class;
+ const-string v1, "valueOf"
+ invoke-virtual {v1}, Ljava/lang/Object;->getClass()Ljava/lang/Class;
+ move-result-object v2
+
+ # Call MethodType.methodType(rtype=String.class, ptype[0]=Long.class)
+ invoke-static {v2, v0}, Ljava/lang/invoke/MethodType;->methodType(Ljava/lang/Class;Ljava/lang/Class;)Ljava/lang/invoke/MethodType;
+ move-result-object v3
+
+ # Call Main.getHandleForStatic(String.class, "valueOf", methodType);
+ invoke-static {v2, v1, v3}, LMain;->getHandleForStatic(Ljava/lang/Class;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ return-object v0
+.end method
.method public static main([Ljava/lang/String;)V
.registers 5
@@ -105,7 +148,7 @@
invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
# Test case 2: Exercise String.valueOf(Object);
- invoke-static {}, LMain;->getStringValueOfHandle()Ljava/lang/invoke/MethodHandle;
+ invoke-static {}, LMain;->getStringValueOfObjectHandle()Ljava/lang/invoke/MethodHandle;
move-result-object v0
const-string v1, "[String1]"
invoke-polymorphic {v0, v1}, Ljava/lang/invoke/MethodHandle;->invokeExact([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Object;)Ljava/lang/String;
@@ -113,5 +156,88 @@
sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ # Test case 3: Exercise String.concat(String, String) with an inexact invoke.
+ # Note that the callsite type here is String type(Object, Object); so the runtime
+ # will generate dynamic type checks for the input arguments.
+ invoke-static {}, LMain;->getStringConcatHandle()Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ const-string v1, "[String1]"
+ const-string v2, "+[String2]"
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/String;
+ move-result-object v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+
+ # Test case 4: Exercise String.valueOf(long);
+ #
+ # We exercise it with various types of unboxing / widening conversions
+ invoke-static {}, LMain;->getStringValueOfLongHandle()Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+
+ # First use a long, this is an invokeExact because the callsite type matches
+ # the function type precisely.
+ const-wide v1, 42
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invokeExact([Ljava/lang/Object;)Ljava/lang/Object;, (J)Ljava/lang/String;
+ move-result-object v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+
+ # Then use an int, should perform a widening conversion.
+ const v1, 40
+ invoke-polymorphic {v0, v1}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (I)Ljava/lang/String;
+ move-result-object v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+
+ # Then use a java/lang/Long; - should perform an unboxing conversion.
+ new-instance v1, Ljava/lang/Long;
+ const-wide v2, 43
+ invoke-direct {v1, v2, v3}, Ljava/lang/Long;-><init>(J)V
+ invoke-polymorphic {v0, v1}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;)Ljava/lang/String;
+ move-result-object v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+
+ # Then use a java/lang/Integer; - should perform an unboxing in addition to a widening conversion.
+ new-instance v1, Ljava/lang/Integer;
+ const v2, 44
+ invoke-direct {v1, v2}, Ljava/lang/Integer;-><init>(I)V
+ invoke-polymorphic {v0, v1}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Integer;)Ljava/lang/String;
+ move-result-object v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+
+ # Test case 5: Exercise int Long.compareTo(Long)
+ invoke-static {}, LMain;->getLongCompareToHandle()Ljava/lang/invoke/MethodHandle;
+ move-result-object v0
+ new-instance v1, Ljava/lang/Long;
+ const-wide v2, 43
+ invoke-direct {v1, v2, v3}, Ljava/lang/Long;-><init>(J)V
+
+ # At this point, v0 is our MethodHandle and v1 is the instance we're going to call compareTo on.
+
+ # Call compareTo(Long) - this is invokeExact semantics.
+ invoke-polymorphic {v0, v1, v1}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;Ljava/lang/Long;)I
+ move-result v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->print(I)V
+
+ # Call compareTo(long) - this is an implicit box.
+ const-wide v2, 44
+ invoke-polymorphic {v0, v1, v2, v3}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;J)I
+ move-result v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->print(I)V
+
+ # Call compareTo(int) - this is an implicit box.
+ const v2, 40
+ invoke-polymorphic {v0, v1, v2}, Ljava/lang/invoke/MethodHandle;->invoke([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Long;I)I
+ move-result v3
+ sget-object v4, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v4, v3}, Ljava/io/PrintStream;->print(I)V
+
+ # Add a newline at the end of file.
+ invoke-virtual {v4}, Ljava/io/PrintStream;->println()V
+
return-void
.end method
diff --git a/test/Android.bp b/test/Android.bp
index 45673f55ff..8496ffdc91 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -249,6 +249,7 @@ art_cc_defaults {
"904-object-allocation/tracking.cc",
"905-object-free/tracking_free.cc",
"906-iterate-heap/iterate_heap.cc",
+ "907-get-loaded-classes/get_loaded_classes.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 64ff5ba2f2..7a5dab0203 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -263,7 +263,7 @@ endif
# 147-stripped-dex-fallback isn't supported on device because --strip-dex
# requires the zip command.
# 569-checker-pattern-replacement tests behaviour present only on host.
-# 90{2,3,4,5,6} are not supported in current form due to linker
+# 90{2,3,4,5,6,7} are not supported in current form due to linker
# restrictions. See b/31681198
TEST_ART_BROKEN_TARGET_TESTS := \
147-stripped-dex-fallback \
@@ -273,6 +273,7 @@ TEST_ART_BROKEN_TARGET_TESTS := \
904-object-allocation \
905-object-free \
906-iterate-heap \
+ 907-get-loaded-classes \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \