summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--compiler/optimizing/builder.cc89
-rw-r--r--compiler/optimizing/builder.h8
-rw-r--r--compiler/optimizing/code_generator_arm.cc63
-rw-r--r--compiler/optimizing/code_generator_arm64.cc3
-rw-r--r--compiler/optimizing/code_generator_x86.cc73
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc75
-rw-r--r--compiler/optimizing/nodes.h60
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc2
-rw-r--r--dex2oat/dex2oat.cc1
-rw-r--r--runtime/base/logging.cc24
-rw-r--r--runtime/base/logging.h4
-rw-r--r--runtime/fault_handler.cc49
-rw-r--r--runtime/java_vm_ext.cc6
-rw-r--r--runtime/java_vm_ext_test.cc132
-rw-r--r--runtime/jni_internal_test.cc20
-rw-r--r--runtime/thread.cc46
-rw-r--r--runtime/thread.h4
-rw-r--r--test/424-checkcast/expected.txt0
-rw-r--r--test/424-checkcast/info.txt1
-rw-r--r--test/424-checkcast/src/Main.java73
-rw-r--r--test/425-invoke-super/expected.txt0
-rw-r--r--test/425-invoke-super/info.txt1
-rw-r--r--test/425-invoke-super/smali/invokesuper.smali40
-rw-r--r--test/425-invoke-super/smali/subclass.smali29
-rw-r--r--test/425-invoke-super/smali/superclass.smali29
-rw-r--r--test/425-invoke-super/src/Main.java53
-rw-r--r--test/Android.run-test.mk1
28 files changed, 769 insertions, 118 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 0259fb4f2c..8753868db4 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -113,6 +113,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/instruction_set_test.cc \
runtime/intern_table_test.cc \
runtime/interpreter/safe_math_test.cc \
+ runtime/java_vm_ext_test.cc \
runtime/leb128_test.cc \
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8418ab0a7e..76a2be927e 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -374,13 +374,12 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
HInvoke* invoke = nullptr;
- if (invoke_type == kVirtual || invoke_type == kInterface) {
+ if (invoke_type == kVirtual || invoke_type == kInterface || invoke_type == kSuper) {
MethodReference target_method(dex_file_, method_idx);
uintptr_t direct_code;
uintptr_t direct_method;
int table_index;
InvokeType optimized_invoke_type = invoke_type;
- // TODO: Add devirtualization support.
compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
&optimized_invoke_type, &target_method, &table_index,
&direct_code, &direct_method);
@@ -388,15 +387,21 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
return false;
}
- if (invoke_type == kVirtual) {
+ if (optimized_invoke_type == kVirtual) {
invoke = new (arena_) HInvokeVirtual(
arena_, number_of_arguments, return_type, dex_offset, table_index);
- } else {
- DCHECK_EQ(invoke_type, kInterface);
+ } else if (optimized_invoke_type == kInterface) {
invoke = new (arena_) HInvokeInterface(
arena_, number_of_arguments, return_type, dex_offset, method_idx, table_index);
+ } else if (optimized_invoke_type == kDirect) {
+ // For this compiler, sharpening only works if we compile PIC.
+ DCHECK(compiler_driver_->GetCompilerOptions().GetCompilePic());
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_offset, target_method.dex_method_index);
}
} else {
+ DCHECK(invoke_type == kDirect || invoke_type == kStatic);
// Treat invoke-direct like static calls for now.
invoke = new (arena_) HInvokeStatic(
arena_, number_of_arguments, return_type, dex_offset, method_idx);
@@ -704,6 +709,38 @@ void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
}
}
+bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_offset) {
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset);
+ current_block_->AddInstruction(cls);
+ // The class needs a temporary before being used by the type check.
+ Temporaries temps(graph_, 1);
+ temps.Add(cls);
+ if (instruction.Opcode() == Instruction::INSTANCE_OF) {
+ current_block_->AddInstruction(
+ new (arena_) HInstanceOf(object, cls, type_known_final, dex_offset));
+ UpdateLocal(destination, current_block_->GetLastInstruction());
+ } else {
+ DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
+ current_block_->AddInstruction(
+ new (arena_) HCheckCast(object, cls, type_known_final, dex_offset));
+ }
+ return true;
+}
+
void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {
if (target_offset <= 0) {
// Unconditionnally add a suspend check to backward branches. We can remove
@@ -858,10 +895,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
- case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_DIRECT:
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_INTERFACE: {
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
@@ -873,10 +911,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
- case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_DIRECT_RANGE:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- case Instruction::INVOKE_INTERFACE_RANGE: {
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
@@ -1292,25 +1331,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::INSTANCE_OF: {
+ uint8_t destination = instruction.VRegA_22c();
+ uint8_t reference = instruction.VRegB_22c();
uint16_t type_index = instruction.VRegC_22c();
- bool type_known_final;
- bool type_known_abstract;
- bool is_referrers_class;
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
- &type_known_final, &type_known_abstract, &is_referrers_class);
- if (!can_access) {
+ if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_offset)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::CHECK_CAST: {
+ uint8_t reference = instruction.VRegA_21c();
+ uint16_t type_index = instruction.VRegB_21c();
+ if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_offset)) {
return false;
}
- HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset);
- current_block_->AddInstruction(cls);
- // The class needs a temporary before being used by the type check.
- Temporaries temps(graph_, 1);
- temps.Add(cls);
- current_block_->AddInstruction(
- new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset));
- UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 09c9a51260..9cf83055b2 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -173,6 +173,14 @@ class HGraphBuilder : public ValueObject {
uint32_t element_count,
uint32_t dex_offset);
+ // Builds a `HInstanceOf`, or a `HCheckCast` instruction.
+ // Returns whether we succeeded in building the instruction.
+ bool BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_offset);
+
ArenaAllocator* const arena_;
// A list of the size of the dex code holding block information for
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 467c2a6c29..b0a56d58a3 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -269,13 +269,19 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
class TypeCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathARM(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
@@ -284,7 +290,7 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
+ MoveOperands move1(class_to_check_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
nullptr);
MoveOperands move2(object_class_,
@@ -295,17 +301,23 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
parallel_move.AddMove(&move2);
arm_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc());
- arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ if (instruction_->IsInstanceOf()) {
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ }
codegen->RestoreLiveRegisters(locations);
__ b(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
@@ -2658,7 +2670,7 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
}
-void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2668,7 +2680,7 @@ void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Register cls = locations->InAt(1).As<Register>();
@@ -2693,7 +2705,7 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), NE);
__ LoadImmediate(out, 1);
@@ -2707,5 +2719,34 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(slow_path->GetExitLabel(), EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ cmp(temp, ShifterOperand(cls));
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4dc836f412..ac65c1d03d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -631,8 +631,10 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
codegen_(codegen) {}
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(CheckCast) \
M(ClinitCheck) \
M(DivZeroCheck) \
+ M(InstanceOf) \
M(InvokeInterface) \
M(LoadClass) \
M(LoadException) \
@@ -641,7 +643,6 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
M(StaticFieldGet) \
M(StaticFieldSet) \
M(Throw) \
- M(TypeCheck) \
M(TypeConversion) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d66180be32..aa609a629c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -270,13 +270,19 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
public:
- TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -285,7 +291,7 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
+ MoveOperands move1(class_to_check_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
nullptr);
MoveOperands move2(object_class_,
@@ -296,17 +302,27 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
parallel_move.AddMove(&move2);
x86_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ if (instruction_->IsInstanceOf()) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
+ }
+
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+ if (instruction_->IsInstanceOf()) {
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
};
@@ -2753,7 +2769,7 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2763,7 +2779,7 @@ void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Location cls = locations->InAt(1);
@@ -2794,7 +2810,7 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2808,5 +2824,40 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ __ movl(temp, Address(obj, class_offset));
+
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e09b6cab08..bd1e4f4f04 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -284,13 +284,19 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86_64(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -299,7 +305,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
+ MoveOperands move1(class_to_check_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
nullptr);
MoveOperands move2(object_class_,
@@ -310,18 +316,29 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
parallel_move.AddMove(&move2);
x64_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ if (instruction_->IsInstanceOf()) {
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
+ }
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+
+ if (instruction_->IsInstanceOf()) {
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
};
@@ -2743,7 +2760,7 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2753,7 +2770,7 @@ void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location cls = locations->InAt(1);
@@ -2783,7 +2800,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, Location::RegisterLocation(out.AsRegister()));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2797,5 +2814,39 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ movl(temp, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ // Classes must be equal for the checkcast to succeed.
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 36e286d578..0c75e41fc4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -479,6 +479,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
M(BoundsCheck, Instruction) \
+ M(CheckCast, Instruction) \
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
@@ -494,6 +495,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(If, Instruction) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
+ M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(InvokeInterface, Invoke) \
M(InvokeStatic, Invoke) \
@@ -525,7 +527,6 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
M(Throw, Instruction) \
- M(TypeCheck, Instruction) \
M(TypeConversion, Instruction) \
#define FOR_EACH_INSTRUCTION(M) \
@@ -2355,12 +2356,12 @@ class HThrow : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
-class HTypeCheck : public HExpression<2> {
+class HInstanceOf : public HExpression<2> {
public:
- explicit HTypeCheck(HInstruction* object,
- HLoadClass* constant,
- bool class_is_final,
- uint32_t dex_pc)
+ HInstanceOf(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
: HExpression(Primitive::kPrimBoolean, SideEffects::None()),
class_is_final_(class_is_final),
dex_pc_(dex_pc) {
@@ -2370,13 +2371,11 @@ class HTypeCheck : public HExpression<2> {
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool NeedsEnvironment() const OVERRIDE {
- // TODO: Can we debug when doing a runtime instanceof check?
return false;
}
@@ -2384,13 +2383,52 @@ class HTypeCheck : public HExpression<2> {
bool IsClassFinal() const { return class_is_final_; }
- DECLARE_INSTRUCTION(TypeCheck);
+ DECLARE_INSTRUCTION(InstanceOf);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
+};
+
+class HCheckCast : public HTemplateInstruction<2> {
+ public:
+ HCheckCast(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // Instruction may throw a CheckCastError.
+ return true;
+ }
+
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(CheckCast);
private:
const bool class_is_final_;
const uint32_t dex_pc_;
- DISALLOW_COPY_AND_ASSIGN(HTypeCheck);
+ DISALLOW_COPY_AND_ASSIGN(HCheckCast);
};
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 5b706584bd..bc547024bf 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -353,7 +353,7 @@ void X86_64Assembler::movss(XmmRegister dst, XmmRegister src) {
void X86_64Assembler::movsxd(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst);
+ EmitRex64(dst, src);
EmitUint8(0x63);
EmitRegisterOperand(dst.LowBits(), src.LowBits());
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d87faebac0..4951b1f412 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -583,6 +583,7 @@ class Dex2Oat FINAL {
compiler_kind_ = Compiler::kQuick;
} else if (backend_str == "Optimizing") {
compiler_kind_ = Compiler::kOptimizing;
+ compile_pic = true;
} else if (backend_str == "Portable") {
compiler_kind_ = Compiler::kPortable;
} else {
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index d3a2655c94..b781d6008c 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -236,4 +236,28 @@ void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_se
#endif
}
+void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity,
+ const char* message) {
+#ifdef HAVE_ANDROID_OS
+ // TODO: be more conservative on stack usage here.
+ LogLine(file, line, log_severity, message);
+#else
+ static const char* log_characters = "VDIWEFF";
+ CHECK_EQ(strlen(log_characters), INTERNAL_FATAL + 1U);
+
+ const char* program_name = ProgramInvocationShortName();
+ write(STDERR_FILENO, program_name, strlen(program_name));
+ write(STDERR_FILENO, " ", 1);
+ write(STDERR_FILENO, &log_characters[log_severity], 1);
+ write(STDERR_FILENO, " ", 1);
+ // TODO: pid and tid.
+ write(STDERR_FILENO, file, strlen(file));
+ // TODO: line.
+ UNUSED(line);
+ write(STDERR_FILENO, "] ", 2);
+ write(STDERR_FILENO, message, strlen(message));
+ write(STDERR_FILENO, "\n", 1);
+#endif
+}
+
} // namespace art
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index baa83e35af..ae83e331fd 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -244,6 +244,10 @@ class LogMessage {
// The routine that performs the actual logging.
static void LogLine(const char* file, unsigned int line, LogSeverity severity, const char* msg);
+ // A variant of the above for use with little stack.
+ static void LogLineLowStack(const char* file, unsigned int line, LogSeverity severity,
+ const char* msg);
+
private:
const std::unique_ptr<LogMessageData> data_;
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index c4736847b9..ab3ec62d69 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -179,6 +179,10 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
// Now set up the nested signal handler.
+ // TODO: add SIGSEGV back to the nested signals when we can handle running out stack gracefully.
+ static const int handled_nested_signals[] = {SIGABRT};
+ constexpr size_t num_handled_nested_signals = arraysize(handled_nested_signals);
+
// Release the fault manager so that it will remove the signal chain for
// SIGSEGV and we call the real sigaction.
fault_manager.Release();
@@ -188,33 +192,40 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
// Unblock the signals we allow so that they can be delivered in the signal handler.
sigset_t sigset;
sigemptyset(&sigset);
- sigaddset(&sigset, SIGSEGV);
- sigaddset(&sigset, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&sigset, signal);
+ }
pthread_sigmask(SIG_UNBLOCK, &sigset, nullptr);
// If we get a signal in this code we want to invoke our nested signal
// handler.
- struct sigaction action, oldsegvaction, oldabortaction;
+ struct sigaction action;
+ struct sigaction oldactions[num_handled_nested_signals];
action.sa_sigaction = art_nested_signal_handler;
// Explicitly mask out SIGSEGV and SIGABRT from the nested signal handler. This
// should be the default but we definitely don't want these happening in our
// nested signal handler.
sigemptyset(&action.sa_mask);
- sigaddset(&action.sa_mask, SIGSEGV);
- sigaddset(&action.sa_mask, SIGABRT);
+ for (int signal : handled_nested_signals) {
+ sigaddset(&action.sa_mask, signal);
+ }
action.sa_flags = SA_SIGINFO | SA_ONSTACK;
#if !defined(__APPLE__) && !defined(__mips__)
action.sa_restorer = nullptr;
#endif
- // Catch SIGSEGV and SIGABRT to invoke our nested handler
- int e1 = sigaction(SIGSEGV, &action, &oldsegvaction);
- int e2 = sigaction(SIGABRT, &action, &oldabortaction);
- if (e1 != 0 || e2 != 0) {
- LOG(ERROR) << "Unable to set up nested signal handler";
- } else {
+ // Catch handled signals to invoke our nested handler.
+ bool success = true;
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &action, &oldactions[i]) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to set up nested signal handler";
+ break;
+ }
+ }
+ if (success) {
// Save the current state and call the handlers. If anything causes a signal
// our nested signal handler will be invoked and this will longjmp to the saved
// state.
@@ -223,8 +234,12 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
if (handler->Action(sig, info, context)) {
// Restore the signal handlers, reinit the fault manager and return. Signal was
// handled.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
fault_manager.Init();
return;
}
@@ -234,8 +249,12 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
}
// Restore the signal handlers.
- sigaction(SIGSEGV, &oldsegvaction, nullptr);
- sigaction(SIGABRT, &oldabortaction, nullptr);
+ for (size_t i = 0; i < num_handled_nested_signals; ++i) {
+ success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
+ if (!success) {
+ PLOG(ERROR) << "Unable to restore signal handler";
+ }
+ }
}
// Now put the fault manager back in place.
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 19e03d8f82..a5abce6ab1 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -795,13 +795,13 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
return JNI_OK;
}
-extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
+extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms_buf, jsize buf_len, jsize* vm_count) {
Runtime* runtime = Runtime::Current();
- if (runtime == nullptr) {
+ if (runtime == nullptr || buf_len == 0) {
*vm_count = 0;
} else {
*vm_count = 1;
- vms[0] = runtime->GetJavaVM();
+ vms_buf[0] = runtime->GetJavaVM();
}
return JNI_OK;
}
diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc
new file mode 100644
index 0000000000..60c6a5c23a
--- /dev/null
+++ b/runtime/java_vm_ext_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+
+#include <pthread.h>
+
+#include "common_runtime_test.h"
+#include "java_vm_ext.h"
+#include "runtime.h"
+
+namespace art {
+
+class JavaVmExtTest : public CommonRuntimeTest {
+ protected:
+ virtual void SetUp() {
+ CommonRuntimeTest::SetUp();
+
+ vm_ = Runtime::Current()->GetJavaVM();
+ }
+
+
+ virtual void TearDown() OVERRIDE {
+ CommonRuntimeTest::TearDown();
+ }
+
+ JavaVMExt* vm_;
+};
+
+TEST_F(JavaVmExtTest, JNI_GetDefaultJavaVMInitArgs) {
+ jint err = JNI_GetDefaultJavaVMInitArgs(nullptr);
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+TEST_F(JavaVmExtTest, JNI_GetCreatedJavaVMs) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ EXPECT_EQ(1, num_vms);
+ EXPECT_EQ(vms_buf[0], vm_);
+}
+
+static bool gSmallStack = false;
+static bool gAsDaemon = false;
+
+static void* attach_current_thread_callback(void* arg ATTRIBUTE_UNUSED) {
+ JavaVM* vms_buf[1];
+ jsize num_vms;
+ JNIEnv* env;
+ jint ok = JNI_GetCreatedJavaVMs(vms_buf, arraysize(vms_buf), &num_vms);
+ EXPECT_EQ(JNI_OK, ok);
+ if (ok == JNI_OK) {
+ if (!gAsDaemon) {
+ ok = vms_buf[0]->AttachCurrentThread(&env, nullptr);
+ } else {
+ ok = vms_buf[0]->AttachCurrentThreadAsDaemon(&env, nullptr);
+ }
+ EXPECT_EQ(gSmallStack ? JNI_ERR : JNI_OK, ok);
+ if (ok == JNI_OK) {
+ ok = vms_buf[0]->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+ }
+ }
+ return nullptr;
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThreadAsDaemon) {
+ pthread_t pthread;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = false;
+ gAsDaemon = true;
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, attach_current_thread_callback,
+ nullptr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, AttachCurrentThread_SmallStack) {
+ pthread_t pthread;
+ pthread_attr_t attr;
+ const char* reason = __PRETTY_FUNCTION__;
+ gSmallStack = true;
+ gAsDaemon = false;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, PTHREAD_STACK_MIN), reason);
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread, &attr, attach_current_thread_callback,
+ nullptr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
+ void* ret_val;
+ CHECK_PTHREAD_CALL(pthread_join, (pthread, &ret_val), reason);
+ EXPECT_EQ(ret_val, nullptr);
+}
+
+TEST_F(JavaVmExtTest, DetachCurrentThread) {
+ JNIEnv* env;
+ jint ok = vm_->AttachCurrentThread(&env, nullptr);
+ ASSERT_EQ(JNI_OK, ok);
+ ok = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+
+ jint err = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_ERR, err);
+}
+
+} // namespace art
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index cab907c378..ccad137164 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1170,7 +1170,15 @@ TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
}
TEST_F(JniInternalTest, GetArrayLength) {
- // Already tested in NewObjectArray/NewPrimitiveArray.
+ // Already tested in NewObjectArray/NewPrimitiveArray except for NULL.
+ CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ EXPECT_EQ(0, env_->GetArrayLength(nullptr));
+ jni_abort_catcher.Check("java_array == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(JNI_ERR, env_->GetArrayLength(nullptr));
+ jni_abort_catcher.Check("jarray was NULL");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, GetObjectClass) {
@@ -2011,14 +2019,4 @@ TEST_F(JniInternalTest, MonitorEnterExit) {
}
}
-TEST_F(JniInternalTest, DetachCurrentThread) {
- CleanUpJniEnv(); // cleanup now so TearDown won't have junk from wrong JNIEnv
- jint ok = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_OK, ok);
-
- jint err = vm_->DetachCurrentThread();
- EXPECT_EQ(JNI_ERR, err);
- vm_->AttachCurrentThread(&env_, nullptr); // need attached thread for CommonRuntimeTest::TearDown
-}
-
} // namespace art
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2c44f27f3f..cb3da8b00b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -158,7 +158,7 @@ void* Thread::CreateCallback(void* arg) {
// Check that if we got here we cannot be shutting down (as shutdown should never have started
// while threads are being born).
CHECK(!runtime->IsShuttingDownLocked());
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM()));
Runtime::Current()->EndThreadBirth();
}
{
@@ -348,40 +348,46 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
}
}
-void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
+bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
// This function does all the initialization that must be run by the native thread it applies to.
// (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
// we can handshake with the corresponding native thread when it's ready.) Check this native
// thread hasn't been through here already...
CHECK(Thread::Current() == nullptr);
+
+ // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
+ // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
+ tlsPtr_.pthread_self = pthread_self();
+ CHECK(is_started_);
+
SetUpAlternateSignalStack();
+ if (!InitStackHwm()) {
+ return false;
+ }
InitCpu();
InitTlsEntryPoints();
RemoveSuspendTrigger();
InitCardTable();
InitTid();
- // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
- // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
- tlsPtr_.pthread_self = pthread_self();
- CHECK(is_started_);
+
CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
DCHECK_EQ(Thread::Current(), this);
tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
- InitStackHwm();
tlsPtr_.jni_env = new JNIEnvExt(this, java_vm);
thread_list->Register(this);
+ return true;
}
Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- Thread* self;
Runtime* runtime = Runtime::Current();
if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
return nullptr;
}
+ Thread* self;
{
MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
@@ -390,8 +396,12 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
} else {
Runtime::Current()->StartThreadBirth();
self = new Thread(as_daemon);
- self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
+ bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
Runtime::Current()->EndThreadBirth();
+ if (!init_success) {
+ delete self;
+ return nullptr;
+ }
}
}
@@ -431,6 +441,11 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
thread_group = runtime->GetMainThreadGroup();
}
ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
+ // Add missing null check in case of OOM b/18297817
+ if (thread_name.get() == nullptr) {
+ CHECK(IsExceptionPending());
+ return;
+ }
jint thread_priority = GetNativePriority();
jboolean thread_is_daemon = as_daemon;
@@ -494,7 +509,7 @@ void Thread::SetThreadName(const char* name) {
Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
}
-void Thread::InitStackHwm() {
+bool Thread::InitStackHwm() {
void* read_stack_base;
size_t read_stack_size;
size_t read_guard_size;
@@ -516,8 +531,10 @@ void Thread::InitStackHwm() {
uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
+ 4 * KB;
if (read_stack_size <= min_stack) {
- LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
- << " bytes)";
+ // Note, as we know the stack is small, avoid operations that could use a lot of stack.
+ LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
+ "Attempt to attach a thread with a too-small stack");
+ return false;
}
// Set stack_end_ to the bottom of the stack saving space of stack overflows
@@ -542,6 +559,8 @@ void Thread::InitStackHwm() {
// Sanity check.
int stack_variable;
CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
+
+ return true;
}
void Thread::ShortDump(std::ostream& os) const {
@@ -1042,7 +1061,8 @@ void Thread::Startup() {
}
// Allocate a TLS slot.
- CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
+ CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
+ "self key");
// Double-check the TLS slot allocation.
if (pthread_getspecific(pthread_key_self_) != nullptr) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 89aee04e5d..7e567fb77c 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -893,14 +893,14 @@ class Thread {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
+ bool Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
void InitCardTable();
void InitCpu();
void CleanupCpu();
void InitTlsEntryPoints();
void InitTid();
void InitPthreadKeySelf();
- void InitStackHwm();
+ bool InitStackHwm();
void SetUpAlternateSignalStack();
void TearDownAlternateSignalStack();
diff --git a/test/424-checkcast/expected.txt b/test/424-checkcast/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/424-checkcast/expected.txt
diff --git a/test/424-checkcast/info.txt b/test/424-checkcast/info.txt
new file mode 100644
index 0000000000..b50b082651
--- /dev/null
+++ b/test/424-checkcast/info.txt
@@ -0,0 +1 @@
+Simple tests for the checkcast opcode.
diff --git a/test/424-checkcast/src/Main.java b/test/424-checkcast/src/Main.java
new file mode 100644
index 0000000000..791b166609
--- /dev/null
+++ b/test/424-checkcast/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static Object a;
+
+ public static Object $opt$CheckCastMain() {
+ return (Main)a;
+ }
+
+ public static Object $opt$CheckCastFinalClass() {
+ return (FinalClass)a;
+ }
+
+ public static void main(String[] args) {
+ $opt$TestMain();
+ $opt$TestFinalClass();
+ }
+
+ public static void $opt$TestMain() {
+ a = new Main();
+ $opt$CheckCastMain();
+
+ a = null;
+ $opt$CheckCastMain();
+
+ a = new MainChild();
+ $opt$CheckCastMain();
+
+ a = new Object();
+ try {
+ $opt$CheckCastMain();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ public static void $opt$TestFinalClass() {
+ a = new FinalClass();
+ $opt$CheckCastFinalClass();
+
+ a = null;
+ $opt$CheckCastFinalClass();
+
+ a = new Main();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+
+ a = new Object();
+ try {
+ $opt$CheckCastFinalClass();
+ throw new Error("Should have gotten a ClassCastException");
+ } catch (ClassCastException ex) {}
+ }
+
+ static class MainChild extends Main {}
+
+ static final class FinalClass {}
+}
diff --git a/test/425-invoke-super/expected.txt b/test/425-invoke-super/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/425-invoke-super/expected.txt
diff --git a/test/425-invoke-super/info.txt b/test/425-invoke-super/info.txt
new file mode 100644
index 0000000000..ad99030a85
--- /dev/null
+++ b/test/425-invoke-super/info.txt
@@ -0,0 +1 @@
+Tests the invoke-super opcode.
diff --git a/test/425-invoke-super/smali/invokesuper.smali b/test/425-invoke-super/smali/invokesuper.smali
new file mode 100644
index 0000000000..ab1309161e
--- /dev/null
+++ b/test/425-invoke-super/smali/invokesuper.smali
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.class public LInvokeSuper;
+.super LSuperClass;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LSuperClass;-><init>()V
+ return-void
+.end method
+
+
+.method public run()I
+.registers 2
+ # Do an invoke super on a non-super class to force slow path.
+ invoke-super {v1}, LInvokeSuper;->returnInt()I
+ move-result v0
+ return v0
+.end method
+
+
+.method public returnInt()I
+.registers 2
+ const v0, 777
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/subclass.smali b/test/425-invoke-super/smali/subclass.smali
new file mode 100644
index 0000000000..54e3474078
--- /dev/null
+++ b/test/425-invoke-super/smali/subclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSubClass;
+.super LInvokeSuper;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, LInvokeSuper;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 0
+ return v0
+.end method
diff --git a/test/425-invoke-super/smali/superclass.smali b/test/425-invoke-super/smali/superclass.smali
new file mode 100644
index 0000000000..b366aa7a91
--- /dev/null
+++ b/test/425-invoke-super/smali/superclass.smali
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSuperClass;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {v0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public returnInt()I
+.registers 2
+ const v0, 42
+ return v0
+.end method
diff --git a/test/425-invoke-super/src/Main.java b/test/425-invoke-super/src/Main.java
new file mode 100644
index 0000000000..1fb62d0871
--- /dev/null
+++ b/test/425-invoke-super/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ static class A {
+ public int foo() { return 1; }
+ }
+
+ static class B extends A {
+ public int $opt$bar() { return super.foo(); }
+ }
+
+ static class C extends B {
+ public int foo() { return 42; }
+ }
+
+ static class D extends C {
+ }
+
+ static void assertEquals(int expected, int value) {
+ if (expected != value) {
+ throw new Error("Expected " + expected + ", got " + value);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ assertEquals(1, new B().$opt$bar());
+ assertEquals(1, new C().$opt$bar());
+ assertEquals(1, new D().$opt$bar());
+
+ Class<?> c = Class.forName("InvokeSuper");
+ Method m = c.getMethod("run");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+
+ c = Class.forName("SubClass");
+ assertEquals(42, ((Integer)m.invoke(c.newInstance(), new Object[0])).intValue());
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e7a04391b2..562ba597b9 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -444,6 +444,7 @@ TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
422-instanceof \
422-type-conversion \
423-invoke-interface \
+ 424-checkcast \
700-LoadArgRegs \
701-easy-div-rem \
702-LargeBranchOffset \