diff options
112 files changed, 1852 insertions, 2757 deletions
diff --git a/Android.mk b/Android.mk index fcf70ff2eb..0d0003abb0 100644 --- a/Android.mk +++ b/Android.mk @@ -122,6 +122,16 @@ include $(art_path)/build/Android.gtest.mk include $(art_path)/test/Android.run-test.mk include $(art_path)/benchmark/Android.mk +TEST_ART_ADB_ROOT_AND_REMOUNT := \ + (adb root && \ + adb wait-for-device remount && \ + ((adb shell touch /system/testfile && \ + (adb shell rm /system/testfile || true)) || \ + (adb disable-verity && \ + adb reboot && \ + adb wait-for-device root && \ + adb wait-for-device remount))) + # Sync test files to the target, depends upon all things that must be pushed to the target. .PHONY: test-art-target-sync # Check if we need to sync. In case ART_TEST_ANDROID_ROOT is not empty, @@ -130,12 +140,11 @@ include $(art_path)/benchmark/Android.mk ifneq ($(ART_TEST_NO_SYNC),true) ifeq ($(ART_TEST_ANDROID_ROOT),) test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS) - adb root - adb wait-for-device remount + $(TEST_ART_ADB_ROOT_AND_REMOUNT) adb sync else test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS) - adb root + $(TEST_ART_ADB_ROOT_AND_REMOUNT) adb wait-for-device push $(ANDROID_PRODUCT_OUT)/system $(ART_TEST_ANDROID_ROOT) adb push $(ANDROID_PRODUCT_OUT)/data /data endif @@ -374,8 +383,7 @@ oat-target: $(ART_TARGET_DEPENDENCIES) $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE) $(O .PHONY: oat-target-sync oat-target-sync: oat-target - adb root - adb wait-for-device remount + $(TEST_ART_ADB_ROOT_AND_REMOUNT) adb sync ######################################################################## diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 717403fe35..dcde5abbca 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -29,7 +29,6 @@ GTEST_DEX_DIRECTORIES := \ GetMethodSignature \ Instrumentation \ Interfaces \ - LambdaInterfaces \ Lookup \ Main \ MultiDex \ @@ -78,7 +77,6 @@ ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex ART_GTEST_oat_test_DEX_DEPS := Main ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY ART_GTEST_proxy_test_DEX_DEPS := Interfaces -ART_GTEST_lambda_proxy_test_DEX_DEPS := LambdaInterfaces ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods ART_GTEST_stub_test_DEX_DEPS := AllFields ART_GTEST_transaction_test_DEX_DEPS := Transaction @@ -99,7 +97,6 @@ ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \ # TODO: document why this is needed. ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32) -ART_GTEST_lambda_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32) # The dexdump test requires an image and the dexdump utility. # TODO: rename into dexdump when migration completes @@ -236,7 +233,6 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \ COMPILER_GTEST_COMMON_SRC_FILES := \ runtime/jni_internal_test.cc \ - runtime/lambda_proxy_test.cc \ runtime/proxy_test.cc \ runtime/reflection_test.cc \ compiler/compiled_method_test.cc \ @@ -745,7 +741,6 @@ ART_GTEST_oat_file_assistant_test_HOST_DEPS := ART_GTEST_oat_file_assistant_test_TARGET_DEPS := ART_GTEST_object_test_DEX_DEPS := ART_GTEST_proxy_test_DEX_DEPS := -ART_GTEST_lambda_proxy_test_DEX_DEPS := ART_GTEST_reflection_test_DEX_DEPS := ART_GTEST_stub_test_DEX_DEPS := ART_GTEST_transaction_test_DEX_DEPS := diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc index f985745e7a..f0cafc847f 100644 --- a/compiler/optimizing/boolean_simplifier.cc +++ b/compiler/optimizing/boolean_simplifier.cc @@ -61,40 +61,6 @@ static bool NegatesCondition(HInstruction* input_true, HInstruction* input_false && input_false->IsIntConstant() && input_false->AsIntConstant()->IsOne(); } -// Returns an instruction with the opposite boolean value from 'cond'. -static HInstruction* GetOppositeCondition(HInstruction* cond) { - HGraph* graph = cond->GetBlock()->GetGraph(); - ArenaAllocator* allocator = graph->GetArena(); - - if (cond->IsCondition()) { - HInstruction* lhs = cond->InputAt(0); - HInstruction* rhs = cond->InputAt(1); - switch (cond->AsCondition()->GetOppositeCondition()) { // get *opposite* - case kCondEQ: return new (allocator) HEqual(lhs, rhs); - case kCondNE: return new (allocator) HNotEqual(lhs, rhs); - case kCondLT: return new (allocator) HLessThan(lhs, rhs); - case kCondLE: return new (allocator) HLessThanOrEqual(lhs, rhs); - case kCondGT: return new (allocator) HGreaterThan(lhs, rhs); - case kCondGE: return new (allocator) HGreaterThanOrEqual(lhs, rhs); - case kCondB: return new (allocator) HBelow(lhs, rhs); - case kCondBE: return new (allocator) HBelowOrEqual(lhs, rhs); - case kCondA: return new (allocator) HAbove(lhs, rhs); - case kCondAE: return new (allocator) HAboveOrEqual(lhs, rhs); - } - } else if (cond->IsIntConstant()) { - HIntConstant* int_const = cond->AsIntConstant(); - if (int_const->IsZero()) { - return graph->GetIntConstant(1); - } else { - DCHECK(int_const->IsOne()); - return graph->GetIntConstant(0); - } - } - // General case when 'cond' is another instruction of type boolean, - // as verified by SSAChecker. - return new (allocator) HBooleanNot(cond); -} - void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) { DCHECK(block->EndsWithIf()); @@ -126,10 +92,7 @@ void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) { HInstruction* replacement; if (NegatesCondition(true_value, false_value)) { - replacement = GetOppositeCondition(if_condition); - if (replacement->GetBlock() == nullptr) { - block->InsertInstructionBefore(replacement, if_instruction); - } + replacement = graph_->InsertOppositeCondition(if_condition, if_instruction); } else if (PreservesCondition(true_value, false_value)) { replacement = if_condition; } else { diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 3257de1858..32968a597b 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -876,12 +876,78 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, clinit_check); } +bool HGraphBuilder::BuildNewInstance(uint16_t type_index, uint32_t dex_pc) { + bool finalizable; + bool can_throw = NeedsAccessCheck(type_index, &finalizable); + + // Only the non-resolved entrypoint handles the finalizable class case. If we + // need access checks, then we haven't resolved the method and the class may + // again be finalizable. + QuickEntrypointEnum entrypoint = (finalizable || can_throw) + ? kQuickAllocObject + : kQuickAllocObjectInitialized; + + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::DexCache> dex_cache(hs.NewHandle( + dex_compilation_unit_->GetClassLinker()->FindDexCache( + soa.Self(), *dex_compilation_unit_->GetDexFile()))); + Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index))); + const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile(); + Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle( + outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file))); + + if (outer_dex_cache.Get() != dex_cache.Get()) { + // We currently do not support inlining allocations across dex files. + return false; + } + + HLoadClass* load_class = new (arena_) HLoadClass( + graph_->GetCurrentMethod(), + type_index, + *dex_compilation_unit_->GetDexFile(), + IsOutermostCompilingClass(type_index), + dex_pc, + /*needs_access_check*/ can_throw); + + current_block_->AddInstruction(load_class); + HInstruction* cls = load_class; + if (!IsInitialized(resolved_class, type_index)) { + cls = new (arena_) HClinitCheck(load_class, dex_pc); + current_block_->AddInstruction(cls); + } + + current_block_->AddInstruction(new (arena_) HNewInstance( + cls, + graph_->GetCurrentMethod(), + dex_pc, + type_index, + *dex_compilation_unit_->GetDexFile(), + can_throw, + finalizable, + entrypoint)); + return true; +} + +bool HGraphBuilder::IsInitialized(Handle<mirror::Class> cls, uint16_t type_index) const { + if (cls.Get() == nullptr) { + return false; + } + if (GetOutermostCompilingClass() == cls.Get()) { + return true; + } + // TODO: find out why this check is needed. + bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache( + *outer_compilation_unit_->GetDexFile(), type_index); + return cls->IsInitialized() && is_in_dex_cache; +} + HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke( uint32_t dex_pc, uint32_t method_idx, HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) { ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<4> hs(soa.Self()); + StackHandleScope<5> hs(soa.Self()); Handle<mirror::DexCache> dex_cache(hs.NewHandle( dex_compilation_unit_->GetClassLinker()->FindDexCache( soa.Self(), *dex_compilation_unit_->GetDexFile()))); @@ -927,13 +993,8 @@ HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke( // whether we should add an explicit class initialization // check for its declaring class before the static method call. - // TODO: find out why this check is needed. - bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache( - *outer_compilation_unit_->GetDexFile(), storage_index); - bool is_initialized = - resolved_method->GetDeclaringClass()->IsInitialized() && is_in_dex_cache; - - if (is_initialized) { + Handle<mirror::Class> cls(hs.NewHandle(resolved_method->GetDeclaringClass())); + if (IsInitialized(cls, storage_index)) { *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone; } else { *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit; @@ -1272,7 +1333,7 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction, uint16_t field_index = instruction.VRegB_21c(); ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<4> hs(soa.Self()); + StackHandleScope<5> hs(soa.Self()); Handle<mirror::DexCache> dex_cache(hs.NewHandle( dex_compilation_unit_->GetClassLinker()->FindDexCache( soa.Self(), *dex_compilation_unit_->GetDexFile()))); @@ -1318,11 +1379,6 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction, } } - // TODO: find out why this check is needed. - bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache( - *outer_compilation_unit_->GetDexFile(), storage_index); - bool is_initialized = resolved_field->GetDeclaringClass()->IsInitialized() && is_in_dex_cache; - HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(), storage_index, *dex_compilation_unit_->GetDexFile(), @@ -1332,12 +1388,14 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction, current_block_->AddInstruction(constant); HInstruction* cls = constant; - if (!is_initialized && !is_outer_class) { + + Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass())); + if (!IsInitialized(klass, storage_index)) { cls = new (arena_) HClinitCheck(constant, dex_pc); current_block_->AddInstruction(cls); } - uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex(); + uint16_t class_def_index = klass->GetDexClassDefIndex(); if (is_put) { // We need to keep the class alive before loading the value. Temporaries temps(graph_); @@ -2509,20 +2567,9 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 current_block_->AddInstruction(fake_string); UpdateLocal(register_index, fake_string, dex_pc); } else { - bool finalizable; - bool can_throw = NeedsAccessCheck(type_index, &finalizable); - QuickEntrypointEnum entrypoint = can_throw - ? kQuickAllocObjectWithAccessCheck - : kQuickAllocObject; - - current_block_->AddInstruction(new (arena_) HNewInstance( - graph_->GetCurrentMethod(), - dex_pc, - type_index, - *dex_compilation_unit_->GetDexFile(), - can_throw, - finalizable, - entrypoint)); + if (!BuildNewInstance(type_index, dex_pc)) { + return false; + } UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc); } break; diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index f857ef0e12..615b0cd738 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -308,6 +308,14 @@ class HGraphBuilder : public ValueObject { uint32_t method_idx, HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement); + // Build a HNewInstance instruction. + bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc); + + // Return whether the compiler can assume `cls` is initialized. `type_index` is the index + // of the class in the outer dex file. + bool IsInitialized(Handle<mirror::Class> cls, uint16_t type_index) const + SHARED_REQUIRES(Locks::mutator_lock_); + ArenaAllocator* const arena_; // A list of the size of the dex code holding block information for diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index cb6bed08ec..461319eae7 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -3361,7 +3361,19 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) { __ mov(o_l, ShifterOperand(high)); __ LoadImmediate(o_h, 0); } - } else { // shift_value < 32 + } else if (shift_value == 1) { + if (op->IsShl()) { + __ Lsls(o_l, low, 1); + __ adc(o_h, high, ShifterOperand(high)); + } else if (op->IsShr()) { + __ Asrs(o_h, high, 1); + __ Rrx(o_l, low); + } else { + __ Lsrs(o_h, high, 1); + __ Rrx(o_l, low); + } + } else { + DCHECK(2 <= shift_value && shift_value < 32) << shift_value; if (op->IsShl()) { __ Lsl(o_h, high, shift_value); __ orr(o_h, o_h, ShifterOperand(low, LSR, 32 - shift_value)); @@ -3413,14 +3425,12 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); InvokeRuntimeCallingConvention calling_convention; - locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetOut(Location::RegisterLocation(R0)); } void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) { - InvokeRuntimeCallingConvention calling_convention; - __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. codegen_->InvokeRuntime(instruction->GetEntrypoint(), @@ -4320,7 +4330,7 @@ void LocationsBuilderARM::VisitArraySet(HArraySet* instruction) { if (needs_write_barrier) { // Temporary registers for the write barrier. locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too. - locations->AddTemp(Location::RequiresRegister()); // Possibly used for read barrier too. + locations->AddTemp(Location::RequiresRegister()); } } diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 2776b7d6c9..d82cb672a0 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1628,6 +1628,47 @@ void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress( Operand(InputOperandAt(instruction, 1))); } +void LocationsBuilderARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall); + locations->SetInAt(HArm64MultiplyAccumulate::kInputAccumulatorIndex, + Location::RequiresRegister()); + locations->SetInAt(HArm64MultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister()); + locations->SetInAt(HArm64MultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) { + Register res = OutputRegister(instr); + Register accumulator = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputAccumulatorIndex); + Register mul_left = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulLeftIndex); + Register mul_right = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulRightIndex); + + // Avoid emitting code that could trigger Cortex A53's erratum 835769. + // This fixup should be carried out for all multiply-accumulate instructions: + // madd, msub, smaddl, smsubl, umaddl and umsubl. + if (instr->GetType() == Primitive::kPrimLong && + codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) { + MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen_)->GetVIXLAssembler(); + vixl::Instruction* prev = masm->GetCursorAddress<vixl::Instruction*>() - vixl::kInstructionSize; + if (prev->IsLoadOrStore()) { + // Make sure we emit only exactly one nop. + vixl::CodeBufferCheckScope scope(masm, + vixl::kInstructionSize, + vixl::CodeBufferCheckScope::kCheck, + vixl::CodeBufferCheckScope::kExactSize); + __ nop(); + } + } + + if (instr->GetOpKind() == HInstruction::kAdd) { + __ Madd(res, mul_left, mul_right, accumulator); + } else { + DCHECK(instr->GetOpKind() == HInstruction::kSub); + __ Msub(res, mul_left, mul_right, accumulator); + } +} + void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); @@ -3372,17 +3413,13 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); InvokeRuntimeCallingConvention calling_convention; - locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1))); + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); } void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = instruction->GetLocations(); - Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt); - DCHECK(type_index.Is(w0)); - __ Mov(type_index, instruction->GetTypeIndex()); // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. codegen_->InvokeRuntime(instruction->GetEntrypoint(), diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 801e203de5..f3178bd77c 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3478,17 +3478,12 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); InvokeRuntimeCallingConvention calling_convention; - locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) { - InvokeRuntimeCallingConvention calling_convention; - Register current_method_register = calling_convention.GetRegisterAt(1); - __ Lw(current_method_register, SP, kCurrentMethodStackOffset); - // Move an uint16_t value to a register. - __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex()); codegen_->InvokeRuntime( GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(), instruction, diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 8530fe7a36..802c435279 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -3270,15 +3270,12 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); InvokeRuntimeCallingConvention calling_convention; - locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) { - LocationSummary* locations = instruction->GetLocations(); - // Move an uint16_t value to a register. - __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex()); codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc(), diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index a87e8ede04..6a9177de26 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3769,13 +3769,11 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) { new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); locations->SetOut(Location::RegisterLocation(EAX)); InvokeRuntimeCallingConvention calling_convention; - locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) { - InvokeRuntimeCallingConvention calling_convention; - __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex())); // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. codegen_->InvokeRuntime(instruction->GetEntrypoint(), @@ -4856,7 +4854,7 @@ void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { // Temporary registers for the write barrier. locations->AddTemp(Location::RequiresRegister()); // Possibly used for ref. poisoning too. // Ensure the card is in a byte register. - locations->AddTemp(Location::RegisterLocation(ECX)); // Possibly used for read barrier too. + locations->AddTemp(Location::RegisterLocation(ECX)); } } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index dcc180804d..8cfd8cb985 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -3765,18 +3765,14 @@ void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); InvokeRuntimeCallingConvention calling_convention; - locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetOut(Location::RegisterLocation(RAX)); } void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) { - InvokeRuntimeCallingConvention calling_convention; - codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)), - instruction->GetTypeIndex()); // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. - codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc(), @@ -4500,8 +4496,6 @@ void LocationsBuilderX86_64::VisitArraySet(HArraySet* instruction) { // This first temporary register is possibly used for heap // reference poisoning and/or read barrier emission too. locations->AddTemp(Location::RequiresRegister()); - // This second temporary register is possibly used for read - // barrier emission too. locations->AddTemp(Location::RequiresRegister()); } } diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index d166d0061f..4438190ec3 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -422,6 +422,12 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit"); } +#ifdef ART_ENABLE_CODEGEN_arm64 + void VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instruction) OVERRIDE { + StartAttributeStream("kind") << instruction->GetOpKind(); + } +#endif + bool IsPass(const char* name) { return strcmp(pass_name_, name) == 0; } diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index c36de84064..4af111b784 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -377,9 +377,10 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) { HInstruction* current = block->GetFirstInstruction(); while (current != nullptr) { - set->Kill(current->GetSideEffects()); // Save the next instruction in case `current` is removed from the graph. HInstruction* next = current->GetNext(); + // Do not kill the set with the side effects of the instruction just now: if + // the instruction is GVN'ed, we don't need to kill. if (current->CanBeMoved()) { if (current->IsBinaryOperation() && current->AsBinaryOperation()->IsCommutative()) { // For commutative ops, (x op y) will be treated the same as (y op x) @@ -395,8 +396,11 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) { current->ReplaceWith(existing); current->GetBlock()->RemoveInstruction(current); } else { + set->Kill(current->GetSideEffects()); set->Add(current); } + } else { + set->Kill(current->GetSideEffects()); } current = next; } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 9ad2dd1c8e..2f3df7fc68 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -169,16 +169,6 @@ void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) { // src instruction->ReplaceWith(input_other); instruction->GetBlock()->RemoveInstruction(instruction); - } else if (instruction->IsShl() && input_cst->IsOne()) { - // Replace Shl looking like - // SHL dst, src, 1 - // with - // ADD dst, src, src - HAdd *add = new(GetGraph()->GetArena()) HAdd(instruction->GetType(), - input_other, - input_other); - instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add); - RecordSimplification(); } } } @@ -372,9 +362,8 @@ void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) { block->RemoveInstruction(equal); RecordSimplification(); } else if (input_const->AsIntConstant()->IsZero()) { - // Replace (bool_value == false) with !bool_value - block->ReplaceAndRemoveInstructionWith( - equal, new (block->GetGraph()->GetArena()) HBooleanNot(input_value)); + equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, equal)); + block->RemoveInstruction(equal); RecordSimplification(); } else { // Replace (bool_value == integer_not_zero_nor_one_constant) with false @@ -399,9 +388,8 @@ void InstructionSimplifierVisitor::VisitNotEqual(HNotEqual* not_equal) { // We are comparing the boolean to a constant which is of type int and can // be any constant. if (input_const->AsIntConstant()->IsOne()) { - // Replace (bool_value != true) with !bool_value - block->ReplaceAndRemoveInstructionWith( - not_equal, new (block->GetGraph()->GetArena()) HBooleanNot(input_value)); + not_equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, not_equal)); + block->RemoveInstruction(not_equal); RecordSimplification(); } else if (input_const->AsIntConstant()->IsZero()) { // Replace (bool_value != false) with bool_value diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index eb79f469eb..54dd2ccaf8 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -62,6 +62,67 @@ void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstructio RecordSimplification(); } +bool InstructionSimplifierArm64Visitor::TrySimpleMultiplyAccumulatePatterns( + HMul* mul, HBinaryOperation* input_binop, HInstruction* input_other) { + DCHECK(Primitive::IsIntOrLongType(mul->GetType())); + DCHECK(input_binop->IsAdd() || input_binop->IsSub()); + DCHECK_NE(input_binop, input_other); + if (!input_binop->HasOnlyOneNonEnvironmentUse()) { + return false; + } + + // Try to interpret patterns like + // a * (b <+/-> 1) + // as + // (a * b) <+/-> a + HInstruction* input_a = input_other; + HInstruction* input_b = nullptr; // Set to a non-null value if we found a pattern to optimize. + HInstruction::InstructionKind op_kind; + + if (input_binop->IsAdd()) { + if ((input_binop->GetConstantRight() != nullptr) && input_binop->GetConstantRight()->IsOne()) { + // Interpret + // a * (b + 1) + // as + // (a * b) + a + input_b = input_binop->GetLeastConstantLeft(); + op_kind = HInstruction::kAdd; + } + } else { + DCHECK(input_binop->IsSub()); + if (input_binop->GetRight()->IsConstant() && + input_binop->GetRight()->AsConstant()->IsMinusOne()) { + // Interpret + // a * (b - (-1)) + // as + // a + (a * b) + input_b = input_binop->GetLeft(); + op_kind = HInstruction::kAdd; + } else if (input_binop->GetLeft()->IsConstant() && + input_binop->GetLeft()->AsConstant()->IsOne()) { + // Interpret + // a * (1 - b) + // as + // a - (a * b) + input_b = input_binop->GetRight(); + op_kind = HInstruction::kSub; + } + } + + if (input_b == nullptr) { + // We did not find a pattern we can optimize. + return false; + } + + HArm64MultiplyAccumulate* mulacc = new(GetGraph()->GetArena()) HArm64MultiplyAccumulate( + mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc()); + + mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc); + input_binop->GetBlock()->RemoveInstruction(input_binop); + + return false; +} + void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) { TryExtractArrayAccessAddress(instruction, instruction->GetArray(), @@ -76,5 +137,78 @@ void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) { Primitive::ComponentSize(instruction->GetComponentType())); } +void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) { + Primitive::Type type = instruction->GetType(); + if (!Primitive::IsIntOrLongType(type)) { + return; + } + + HInstruction* use = instruction->HasNonEnvironmentUses() + ? instruction->GetUses().GetFirst()->GetUser() + : nullptr; + + if (instruction->HasOnlyOneNonEnvironmentUse() && (use->IsAdd() || use->IsSub())) { + // Replace code looking like + // MUL tmp, x, y + // SUB dst, acc, tmp + // with + // MULSUB dst, acc, x, y + // Note that we do not want to (unconditionally) perform the merge when the + // multiplication has multiple uses and it can be merged in all of them. + // Multiple uses could happen on the same control-flow path, and we would + // then increase the amount of work. In the future we could try to evaluate + // whether all uses are on different control-flow paths (using dominance and + // reverse-dominance information) and only perform the merge when they are. + HInstruction* accumulator = nullptr; + HBinaryOperation* binop = use->AsBinaryOperation(); + HInstruction* binop_left = binop->GetLeft(); + HInstruction* binop_right = binop->GetRight(); + // Be careful after GVN. This should not happen since the `HMul` has only + // one use. + DCHECK_NE(binop_left, binop_right); + if (binop_right == instruction) { + accumulator = binop_left; + } else if (use->IsAdd()) { + DCHECK_EQ(binop_left, instruction); + accumulator = binop_right; + } + + if (accumulator != nullptr) { + HArm64MultiplyAccumulate* mulacc = + new (GetGraph()->GetArena()) HArm64MultiplyAccumulate(type, + binop->GetKind(), + accumulator, + instruction->GetLeft(), + instruction->GetRight()); + + binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc); + DCHECK(!instruction->HasUses()); + instruction->GetBlock()->RemoveInstruction(instruction); + RecordSimplification(); + return; + } + } + + // Use multiply accumulate instruction for a few simple patterns. + // We prefer not applying the following transformations if the left and + // right inputs perform the same operation. + // We rely on GVN having squashed the inputs if appropriate. However the + // results are still correct even if that did not happen. + if (instruction->GetLeft() == instruction->GetRight()) { + return; + } + + HInstruction* left = instruction->GetLeft(); + HInstruction* right = instruction->GetRight(); + if ((right->IsAdd() || right->IsSub()) && + TrySimpleMultiplyAccumulatePatterns(instruction, right->AsBinaryOperation(), left)) { + return; + } + if ((left->IsAdd() || left->IsSub()) && + TrySimpleMultiplyAccumulatePatterns(instruction, left->AsBinaryOperation(), right)) { + return; + } +} + } // namespace arm64 } // namespace art diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h index 4b697dba0e..eed2276588 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.h +++ b/compiler/optimizing/instruction_simplifier_arm64.h @@ -40,8 +40,14 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { HInstruction* index, int access_size); + bool TrySimpleMultiplyAccumulatePatterns(HMul* mul, + HBinaryOperation* input_binop, + HInstruction* input_other); + + // HInstruction visitors, sorted alphabetically. void VisitArrayGet(HArrayGet* instruction) OVERRIDE; void VisitArraySet(HArraySet* instruction) OVERRIDE; + void VisitMul(HMul* instruction) OVERRIDE; OptimizingCompilerStats* stats_; }; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 0a39ff31bf..890598d687 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2090,4 +2090,46 @@ void HInstruction::RemoveEnvironmentUsers() { env_uses_.Clear(); } +// Returns an instruction with the opposite boolean value from 'cond'. +HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) { + ArenaAllocator* allocator = GetArena(); + + if (cond->IsCondition() && + !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType())) { + // Can't reverse floating point conditions. We have to use HBooleanNot in that case. + HInstruction* lhs = cond->InputAt(0); + HInstruction* rhs = cond->InputAt(1); + HInstruction* replacement = nullptr; + switch (cond->AsCondition()->GetOppositeCondition()) { // get *opposite* + case kCondEQ: replacement = new (allocator) HEqual(lhs, rhs); break; + case kCondNE: replacement = new (allocator) HNotEqual(lhs, rhs); break; + case kCondLT: replacement = new (allocator) HLessThan(lhs, rhs); break; + case kCondLE: replacement = new (allocator) HLessThanOrEqual(lhs, rhs); break; + case kCondGT: replacement = new (allocator) HGreaterThan(lhs, rhs); break; + case kCondGE: replacement = new (allocator) HGreaterThanOrEqual(lhs, rhs); break; + case kCondB: replacement = new (allocator) HBelow(lhs, rhs); break; + case kCondBE: replacement = new (allocator) HBelowOrEqual(lhs, rhs); break; + case kCondA: replacement = new (allocator) HAbove(lhs, rhs); break; + case kCondAE: replacement = new (allocator) HAboveOrEqual(lhs, rhs); break; + default: + LOG(FATAL) << "Unexpected condition"; + UNREACHABLE(); + } + cursor->GetBlock()->InsertInstructionBefore(replacement, cursor); + return replacement; + } else if (cond->IsIntConstant()) { + HIntConstant* int_const = cond->AsIntConstant(); + if (int_const->IsZero()) { + return GetIntConstant(1); + } else { + DCHECK(int_const->IsOne()); + return GetIntConstant(0); + } + } else { + HInstruction* replacement = new (allocator) HBooleanNot(cond); + cursor->GetBlock()->InsertInstructionBefore(replacement, cursor); + return replacement; + } +} + } // namespace art diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 4f894b07c7..1bd626fe2b 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -371,6 +371,11 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { bool HasTryCatch() const { return has_try_catch_; } void SetHasTryCatch(bool value) { has_try_catch_ = value; } + // Returns an instruction with the opposite boolean value from 'cond'. + // The instruction has been inserted into the graph, either as a constant, or + // before cursor. + HInstruction* InsertOppositeCondition(HInstruction* cond, HInstruction* cursor); + private: void FindBackEdges(ArenaBitVector* visited); void RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const; @@ -1096,7 +1101,8 @@ class HLoopInformationOutwardIterator : public ValueObject { #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) #else #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \ - M(Arm64IntermediateAddress, Instruction) + M(Arm64IntermediateAddress, Instruction) \ + M(Arm64MultiplyAccumulate, Instruction) #endif #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) @@ -1626,6 +1632,11 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> { return holder_; } + + bool IsFromInlinedInvoke() const { + return GetParent() != nullptr; + } + private: // Record instructions' use entries of this environment for constant-time removal. // It should only be called by HInstruction when a new environment use is added. @@ -3238,7 +3249,7 @@ class HInvoke : public HInstruction { void SetIntrinsic(Intrinsics intrinsic, IntrinsicNeedsEnvironmentOrCache needs_env_or_cache); bool IsFromInlinedInvoke() const { - return GetEnvironment()->GetParent() != nullptr; + return GetEnvironment()->IsFromInlinedInvoke(); } bool CanThrow() const OVERRIDE { return true; } @@ -3652,9 +3663,10 @@ class HInvokeInterface : public HInvoke { DISALLOW_COPY_AND_ASSIGN(HInvokeInterface); }; -class HNewInstance : public HExpression<1> { +class HNewInstance : public HExpression<2> { public: - HNewInstance(HCurrentMethod* current_method, + HNewInstance(HInstruction* cls, + HCurrentMethod* current_method, uint32_t dex_pc, uint16_t type_index, const DexFile& dex_file, @@ -3667,7 +3679,8 @@ class HNewInstance : public HExpression<1> { can_throw_(can_throw), finalizable_(finalizable), entrypoint_(entrypoint) { - SetRawInputAt(0, current_method); + SetRawInputAt(0, cls); + SetRawInputAt(1, current_method); } uint16_t GetTypeIndex() const { return type_index_; } @@ -3687,6 +3700,10 @@ class HNewInstance : public HExpression<1> { QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; } + void SetEntrypoint(QuickEntrypointEnum entrypoint) { + entrypoint_ = entrypoint; + } + DECLARE_INSTRUCTION(NewInstance); private: @@ -3694,7 +3711,7 @@ class HNewInstance : public HExpression<1> { const DexFile& dex_file_; const bool can_throw_; const bool finalizable_; - const QuickEntrypointEnum entrypoint_; + QuickEntrypointEnum entrypoint_; DISALLOW_COPY_AND_ASSIGN(HNewInstance); }; @@ -4302,9 +4319,13 @@ class HPhi : public HInstruction { : HInstruction(SideEffects::None(), dex_pc), inputs_(number_of_inputs, arena->Adapter(kArenaAllocPhiInputs)), reg_number_(reg_number), - type_(type), - is_live_(false), + type_(ToPhiType(type)), + // Phis are constructed live and marked dead if conflicting or unused. + // Individual steps of SsaBuilder should assume that if a phi has been + // marked dead, it can be ignored and will be removed by SsaPhiElimination. + is_live_(true), can_be_null_(true) { + DCHECK_NE(type_, Primitive::kPrimVoid); } // Returns a type equivalent to the given `type`, but that a `HPhi` can hold. @@ -4927,6 +4948,7 @@ class HClinitCheck : public HExpression<1> { return true; } + bool CanThrow() const OVERRIDE { return true; } HLoadClass* GetLoadClass() const { return InputAt(0)->AsLoadClass(); } diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h index 885d3a29ee..d07f019c60 100644 --- a/compiler/optimizing/nodes_arm64.h +++ b/compiler/optimizing/nodes_arm64.h @@ -42,6 +42,40 @@ class HArm64IntermediateAddress : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress); }; +class HArm64MultiplyAccumulate : public HExpression<3> { + public: + HArm64MultiplyAccumulate(Primitive::Type type, + InstructionKind op, + HInstruction* accumulator, + HInstruction* mul_left, + HInstruction* mul_right, + uint32_t dex_pc = kNoDexPc) + : HExpression(type, SideEffects::None(), dex_pc), op_kind_(op) { + SetRawInputAt(kInputAccumulatorIndex, accumulator); + SetRawInputAt(kInputMulLeftIndex, mul_left); + SetRawInputAt(kInputMulRightIndex, mul_right); + } + + static constexpr int kInputAccumulatorIndex = 0; + static constexpr int kInputMulLeftIndex = 1; + static constexpr int kInputMulRightIndex = 2; + + bool CanBeMoved() const OVERRIDE { return true; } + bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + return op_kind_ == other->AsArm64MultiplyAccumulate()->op_kind_; + } + + InstructionKind GetOpKind() const { return op_kind_; } + + DECLARE_INSTRUCTION(Arm64MultiplyAccumulate); + + private: + // Indicates if this is a MADD or MSUB. + InstructionKind op_kind_; + + DISALLOW_COPY_AND_ASSIGN(HArm64MultiplyAccumulate); +}; + } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_ diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 2204921c53..dec08d8978 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -110,24 +110,23 @@ class PassScope; class PassObserver : public ValueObject { public: PassObserver(HGraph* graph, - const char* method_name, CodeGenerator* codegen, std::ostream* visualizer_output, CompilerDriver* compiler_driver) : graph_(graph), - method_name_(method_name), + cached_method_name_(), timing_logger_enabled_(compiler_driver->GetDumpPasses()), - timing_logger_(method_name, true, true), + timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true), disasm_info_(graph->GetArena()), visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()), visualizer_(visualizer_output, graph, *codegen), graph_in_bad_state_(false) { if (timing_logger_enabled_ || visualizer_enabled_) { - if (!IsVerboseMethod(compiler_driver, method_name)) { + if (!IsVerboseMethod(compiler_driver, GetMethodName())) { timing_logger_enabled_ = visualizer_enabled_ = false; } if (visualizer_enabled_) { - visualizer_.PrintHeader(method_name_); + visualizer_.PrintHeader(GetMethodName()); codegen->SetDisassemblyInformation(&disasm_info_); } } @@ -135,7 +134,7 @@ class PassObserver : public ValueObject { ~PassObserver() { if (timing_logger_enabled_) { - LOG(INFO) << "TIMINGS " << method_name_; + LOG(INFO) << "TIMINGS " << GetMethodName(); LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); } } @@ -148,6 +147,14 @@ class PassObserver : public ValueObject { void SetGraphInBadState() { graph_in_bad_state_ = true; } + const char* GetMethodName() { + // PrettyMethod() is expensive, so we delay calling it until we actually have to. + if (cached_method_name_.empty()) { + cached_method_name_ = PrettyMethod(graph_->GetMethodIdx(), graph_->GetDexFile()); + } + return cached_method_name_.c_str(); + } + private: void StartPass(const char* pass_name) { // Dump graph first, then start timer. @@ -206,7 +213,8 @@ class PassObserver : public ValueObject { } HGraph* const graph_; - const char* method_name_; + + std::string cached_method_name_; bool timing_logger_enabled_; TimingLogger timing_logger_; @@ -664,7 +672,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, jobject class_loader, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) const { - std::string method_name = PrettyMethod(method_idx, dex_file); MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); CompilerDriver* compiler_driver = GetCompilerDriver(); InstructionSet instruction_set = compiler_driver->GetInstructionSet(); @@ -728,7 +735,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()); PassObserver pass_observer(graph, - method_name.c_str(), codegen.get(), visualizer_output_.get(), compiler_driver); @@ -756,7 +762,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, interpreter_metadata, dex_cache); - VLOG(compiler) << "Building " << method_name; + VLOG(compiler) << "Building " << pass_observer.GetMethodName(); { PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer); @@ -766,13 +772,14 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, } } - VLOG(compiler) << "Optimizing " << method_name; + VLOG(compiler) << "Optimizing " << pass_observer.GetMethodName(); if (run_optimizations_) { { PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer); if (!graph->TryBuildingSsa()) { // We could not transform the graph to SSA, bailout. - LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop"; + LOG(INFO) << "Skipping compilation of " << pass_observer.GetMethodName() + << ": it contains a non natural loop"; MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA); pass_observer.SetGraphInBadState(); return nullptr; diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index f3d075caaa..d1770b75ab 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -48,22 +48,34 @@ void PrepareForRegisterAllocation::VisitBoundType(HBoundType* bound_type) { } void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) { - // Try to find a static invoke from which this check originated. - HInvokeStaticOrDirect* invoke = nullptr; + // Try to find a static invoke or a new-instance from which this check originated. + HInstruction* implicit_clinit = nullptr; for (HUseIterator<HInstruction*> it(check->GetUses()); !it.Done(); it.Advance()) { HInstruction* user = it.Current()->GetUser(); - if (user->IsInvokeStaticOrDirect() && CanMoveClinitCheck(check, user)) { - invoke = user->AsInvokeStaticOrDirect(); - DCHECK(invoke->IsStaticWithExplicitClinitCheck()); - invoke->RemoveExplicitClinitCheck(HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit); + if ((user->IsInvokeStaticOrDirect() || user->IsNewInstance()) && + CanMoveClinitCheck(check, user)) { + implicit_clinit = user; + if (user->IsInvokeStaticOrDirect()) { + DCHECK(user->AsInvokeStaticOrDirect()->IsStaticWithExplicitClinitCheck()); + user->AsInvokeStaticOrDirect()->RemoveExplicitClinitCheck( + HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit); + } else { + DCHECK(user->IsNewInstance()); + // We delegate the initialization duty to the allocation. + if (user->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectInitialized) { + user->AsNewInstance()->SetEntrypoint(kQuickAllocObjectResolved); + } + } break; } } - // If we found a static invoke for merging, remove the check from all other static invokes. - if (invoke != nullptr) { + // If we found a static invoke or new-instance for merging, remove the check + // from dominated static invokes. + if (implicit_clinit != nullptr) { for (HUseIterator<HInstruction*> it(check->GetUses()); !it.Done(); ) { HInstruction* user = it.Current()->GetUser(); - DCHECK(invoke->StrictlyDominates(user)); // All other uses must be dominated. + // All other uses must be dominated. + DCHECK(implicit_clinit->StrictlyDominates(user) || (implicit_clinit == user)); it.Advance(); // Advance before we remove the node, reference to the next node is preserved. if (user->IsInvokeStaticOrDirect()) { user->AsInvokeStaticOrDirect()->RemoveExplicitClinitCheck( @@ -77,8 +89,8 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) { check->ReplaceWith(load_class); - if (invoke != nullptr) { - // Remove the check from the graph. It has been merged into the invoke. + if (implicit_clinit != nullptr) { + // Remove the check from the graph. It has been merged into the invoke or new-instance. check->GetBlock()->RemoveInstruction(check); // Check if we can merge the load class as well. if (can_merge_with_load_class && !load_class->HasUses()) { @@ -92,6 +104,29 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) { } } +void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) { + HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass(); + bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse(); + // Change the entrypoint to kQuickAllocObject if either: + // - the class is finalizable (only kQuickAllocObject handles finalizable classes), + // - the class needs access checks (we do not know if it's finalizable), + // - or the load class has only one use. + if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) { + instruction->SetEntrypoint(kQuickAllocObject); + instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex()), 0); + // The allocation entry point that deals with access checks does not work with inlined + // methods, so we need to check whether this allocation comes from an inlined method. + if (has_only_one_use && !instruction->GetEnvironment()->IsFromInlinedInvoke()) { + // We can remove the load class from the graph. If it needed access checks, we delegate + // the access check to the allocation. + if (load_class->NeedsAccessCheck()) { + instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck); + } + load_class->GetBlock()->RemoveInstruction(load_class); + } + } +} + void PrepareForRegisterAllocation::VisitCondition(HCondition* condition) { bool needs_materialization = false; if (!condition->GetUses().HasOnlyOneUse() || !condition->GetEnvUses().IsEmpty()) { diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index a70fb309df..9b2434250d 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -40,6 +40,7 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { void VisitClinitCheck(HClinitCheck* check) OVERRIDE; void VisitCondition(HCondition* condition) OVERRIDE; void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE; + void VisitNewInstance(HNewInstance* instruction) OVERRIDE; bool CanMoveClinitCheck(HInstruction* input, HInstruction* user); diff --git a/compiler/optimizing/primitive_type_propagation.cc b/compiler/optimizing/primitive_type_propagation.cc index c98f43e461..bde54ee977 100644 --- a/compiler/optimizing/primitive_type_propagation.cc +++ b/compiler/optimizing/primitive_type_propagation.cc @@ -63,7 +63,6 @@ bool PrimitiveTypePropagation::UpdateType(HPhi* phi) { : SsaBuilder::GetFloatOrDoubleEquivalent(phi, input, new_type); phi->ReplaceInput(equivalent, i); if (equivalent->IsPhi()) { - equivalent->AsPhi()->SetLive(); AddToWorklist(equivalent->AsPhi()); } else if (equivalent == input) { // The input has changed its type. It can be an input of other phis, diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index 5190eb3b26..9e6cfbe653 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -22,6 +22,13 @@ namespace art { +// Returns whether this is a loop header phi which was eagerly created but later +// found inconsistent due to the vreg being undefined in one of its predecessors. +// Such phi is marked dead and should be ignored until its removal in SsaPhiElimination. +static bool IsUndefinedLoopHeaderPhi(HPhi* phi) { + return phi->IsLoopHeaderPhi() && phi->InputCount() != phi->GetBlock()->GetPredecessors().size(); +} + /** * A debuggable application may require to reviving phis, to ensure their * associated DEX register is available to a debugger. This class implements @@ -165,17 +172,15 @@ bool DeadPhiHandling::UpdateType(HPhi* phi) { void DeadPhiHandling::VisitBasicBlock(HBasicBlock* block) { for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HPhi* phi = it.Current()->AsPhi(); + if (IsUndefinedLoopHeaderPhi(phi)) { + DCHECK(phi->IsDead()); + continue; + } if (phi->IsDead() && phi->HasEnvironmentUses()) { phi->SetLive(); if (block->IsLoopHeader()) { - // Give a type to the loop phi to guarantee convergence of the algorithm. - // Note that the dead phi may already have a type if it is an equivalent - // generated for a typed LoadLocal. In that case we do not change the - // type because it could lead to an unsupported PrimNot/Float/Double -> - // PrimInt/Long transition and create same type equivalents. - if (phi->GetType() == Primitive::kPrimVoid) { - phi->SetType(phi->InputAt(0)->GetType()); - } + // Loop phis must have a type to guarantee convergence of the algorithm. + DCHECK_NE(phi->GetType(), Primitive::kPrimVoid); AddToWorklist(phi); } else { // Because we are doing a reverse post order visit, all inputs of @@ -220,6 +225,27 @@ void DeadPhiHandling::Run() { ProcessWorklist(); } +void SsaBuilder::SetLoopHeaderPhiInputs() { + for (size_t i = loop_headers_.size(); i > 0; --i) { + HBasicBlock* block = loop_headers_[i - 1]; + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + HPhi* phi = it.Current()->AsPhi(); + size_t vreg = phi->GetRegNumber(); + for (HBasicBlock* predecessor : block->GetPredecessors()) { + HInstruction* value = ValueOfLocal(predecessor, vreg); + if (value == nullptr) { + // Vreg is undefined at this predecessor. Mark it dead and leave with + // fewer inputs than predecessors. SsaChecker will fail if not removed. + phi->SetDead(); + break; + } else { + phi->AddInput(value); + } + } + } + } +} + void SsaBuilder::FixNullConstantType() { // The order doesn't matter here. for (HReversePostOrderIterator itb(*GetGraph()); !itb.Done(); itb.Advance()) { @@ -283,15 +309,7 @@ void SsaBuilder::BuildSsa() { } // 2) Set inputs of loop phis. - for (HBasicBlock* block : loop_headers_) { - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - HPhi* phi = it.Current()->AsPhi(); - for (HBasicBlock* predecessor : block->GetPredecessors()) { - HInstruction* input = ValueOfLocal(predecessor, phi->GetRegNumber()); - phi->AddInput(input); - } - } - } + SetLoopHeaderPhiInputs(); // 3) Mark dead phis. This will mark phis that are only used by environments: // at the DEX level, the type of these phis does not need to be consistent, but @@ -403,8 +421,13 @@ ArenaVector<HInstruction*>* SsaBuilder::GetLocalsFor(HBasicBlock* block) { for (size_t i = 0; i < vregs; ++i) { // No point in creating the catch phi if it is already undefined at // the first throwing instruction. - if ((*current_locals_)[i] != nullptr) { - HPhi* phi = new (arena) HPhi(arena, i, 0, Primitive::kPrimVoid); + HInstruction* current_local_value = (*current_locals_)[i]; + if (current_local_value != nullptr) { + HPhi* phi = new (arena) HPhi( + arena, + i, + 0, + current_local_value->GetType()); block->AddPhi(phi); (*locals)[i] = phi; } @@ -451,7 +474,10 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) { HInstruction* incoming = ValueOfLocal(block->GetLoopInformation()->GetPreHeader(), local); if (incoming != nullptr) { HPhi* phi = new (GetGraph()->GetArena()) HPhi( - GetGraph()->GetArena(), local, 0, Primitive::kPrimVoid); + GetGraph()->GetArena(), + local, + 0, + incoming->GetType()); block->AddPhi(phi); (*current_locals_)[local] = phi; } @@ -484,8 +510,12 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) { } if (is_different) { + HInstruction* first_input = ValueOfLocal(block->GetPredecessors()[0], local); HPhi* phi = new (GetGraph()->GetArena()) HPhi( - GetGraph()->GetArena(), local, block->GetPredecessors().size(), Primitive::kPrimVoid); + GetGraph()->GetArena(), + local, + block->GetPredecessors().size(), + first_input->GetType()); for (size_t i = 0; i < block->GetPredecessors().size(); i++) { HInstruction* pred_value = ValueOfLocal(block->GetPredecessors()[i], local); phi->SetRawInputAt(i, pred_value); @@ -583,8 +613,16 @@ HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive: phi->GetBlock()->InsertPhiAfter(new_phi, phi); return new_phi; } else { - DCHECK_EQ(next->GetType(), type); - return next->AsPhi(); + HPhi* next_phi = next->AsPhi(); + DCHECK_EQ(next_phi->GetType(), type); + if (next_phi->IsDead()) { + // TODO(dbrazdil): Remove this SetLive (we should not need to revive phis) + // once we stop running MarkDeadPhis before PrimitiveTypePropagation. This + // cannot revive undefined loop header phis because they cannot have uses. + DCHECK(!IsUndefinedLoopHeaderPhi(next_phi)); + next_phi->SetLive(); + } + return next_phi; } } @@ -638,7 +676,36 @@ void SsaBuilder::VisitLoadLocal(HLoadLocal* load) { } void SsaBuilder::VisitStoreLocal(HStoreLocal* store) { - (*current_locals_)[store->GetLocal()->GetRegNumber()] = store->InputAt(1); + uint32_t reg_number = store->GetLocal()->GetRegNumber(); + HInstruction* stored_value = store->InputAt(1); + Primitive::Type stored_type = stored_value->GetType(); + DCHECK_NE(stored_type, Primitive::kPrimVoid); + + // Storing into vreg `reg_number` may implicitly invalidate the surrounding + // registers. Consider the following cases: + // (1) Storing a wide value must overwrite previous values in both `reg_number` + // and `reg_number+1`. We store `nullptr` in `reg_number+1`. + // (2) If vreg `reg_number-1` holds a wide value, writing into `reg_number` + // must invalidate it. We store `nullptr` in `reg_number-1`. + // Consequently, storing a wide value into the high vreg of another wide value + // will invalidate both `reg_number-1` and `reg_number+1`. + + if (reg_number != 0) { + HInstruction* local_low = (*current_locals_)[reg_number - 1]; + if (local_low != nullptr && Primitive::Is64BitType(local_low->GetType())) { + // The vreg we are storing into was previously the high vreg of a pair. + // We need to invalidate its low vreg. + DCHECK((*current_locals_)[reg_number] == nullptr); + (*current_locals_)[reg_number - 1] = nullptr; + } + } + + (*current_locals_)[reg_number] = stored_value; + if (Primitive::Is64BitType(stored_type)) { + // We are storing a pair. Invalidate the instruction in the high vreg. + (*current_locals_)[reg_number + 1] = nullptr; + } + store->GetBlock()->RemoveInstruction(store); } diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index 79f1a28ac8..dcce5e4c2c 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -81,6 +81,7 @@ class SsaBuilder : public HGraphVisitor { static constexpr const char* kSsaBuilderPassName = "ssa_builder"; private: + void SetLoopHeaderPhiInputs(); void FixNullConstantType(); void EquivalentPhisCleanup(); diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index 72f9ddd506..a3219dcc38 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -16,6 +16,8 @@ #include "ssa_phi_elimination.h" +#include "base/arena_containers.h" + namespace art { void SsaDeadPhiElimination::Run() { @@ -24,22 +26,36 @@ void SsaDeadPhiElimination::Run() { } void SsaDeadPhiElimination::MarkDeadPhis() { + // Phis are constructed live and should not be revived if previously marked + // dead. This algorithm temporarily breaks that invariant but we DCHECK that + // only phis which were initially live are revived. + ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter()); + // Add to the worklist phis referenced by non-phi instructions. for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { HPhi* phi = inst_it.Current()->AsPhi(); - // Set dead ahead of running through uses. The phi may have no use. - phi->SetDead(); + if (phi->IsDead()) { + continue; + } + + bool has_non_phi_use = false; for (HUseIterator<HInstruction*> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) { - HUseListNode<HInstruction*>* current = use_it.Current(); - HInstruction* user = current->GetUser(); - if (!user->IsPhi()) { - worklist_.push_back(phi); - phi->SetLive(); + if (!use_it.Current()->GetUser()->IsPhi()) { + has_non_phi_use = true; break; } } + + if (has_non_phi_use) { + worklist_.push_back(phi); + } else { + phi->SetDead(); + if (kIsDebugBuild) { + initially_live.insert(phi); + } + } } } @@ -48,10 +64,13 @@ void SsaDeadPhiElimination::MarkDeadPhis() { HPhi* phi = worklist_.back(); worklist_.pop_back(); for (HInputIterator it(phi); !it.Done(); it.Advance()) { - HInstruction* input = it.Current(); - if (input->IsPhi() && input->AsPhi()->IsDead()) { - worklist_.push_back(input->AsPhi()); - input->AsPhi()->SetLive(); + HPhi* input = it.Current()->AsPhi(); + if (input != nullptr && input->IsDead()) { + // Input is a dead phi. Revive it and add to the worklist. We make sure + // that the phi was not dead initially (see definition of `initially_live`). + DCHECK(ContainsElement(initially_live, input)); + input->SetLive(); + worklist_.push_back(input); } } } @@ -118,7 +137,6 @@ void SsaRedundantPhiElimination::Run() { } if (phi->InputCount() == 0) { - DCHECK(phi->IsCatchPhi()); DCHECK(phi->IsDead()); continue; } diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 68e39568bb..dead8fd9a8 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -342,9 +342,9 @@ bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) { return IsAbsoluteUint<12>(offset); case kLoadSWord: case kLoadDWord: - return IsAbsoluteUint<10>(offset); // VFP addressing mode. + return IsAbsoluteUint<10>(offset) && (offset & 3) == 0; // VFP addressing mode. case kLoadWordPair: - return IsAbsoluteUint<10>(offset); + return IsAbsoluteUint<10>(offset) && (offset & 3) == 0; default: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE(); @@ -360,9 +360,9 @@ bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) { return IsAbsoluteUint<12>(offset); case kStoreSWord: case kStoreDWord: - return IsAbsoluteUint<10>(offset); // VFP addressing mode. + return IsAbsoluteUint<10>(offset) && (offset & 3) == 0; // VFP addressing mode. case kStoreWordPair: - return IsAbsoluteUint<10>(offset); + return IsAbsoluteUint<10>(offset) && (offset & 3) == 0; default: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE(); diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h index 5233dcbbb0..ce3a87275d 100644 --- a/compiler/utils/arm/assembler_arm32.h +++ b/compiler/utils/arm/assembler_arm32.h @@ -389,8 +389,6 @@ class Arm32Assembler FINAL : public ArmAssembler { void EmitBranch(Condition cond, Label* label, bool link); static int32_t EncodeBranchOffset(int offset, int32_t inst); static int DecodeBranchOffset(int32_t inst); - int32_t EncodeTstOffset(int offset, int32_t inst); - int DecodeTstOffset(int32_t inst); bool ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op); }; diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index 297cc54e29..7ad5b440e0 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -1349,7 +1349,8 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED, int32_t encoding = 0; if (so.IsImmediate()) { // Check special cases. - if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12))) { + if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12)) && + /* Prefer T3 encoding to T4. */ !ShifterOperandCanAlwaysHold(so.GetImmediate())) { if (set_cc != kCcSet) { if (opcode == SUB) { thumb_opcode = 5U; @@ -3220,7 +3221,7 @@ void Thumb2Assembler::Ror(Register rd, Register rm, uint32_t shift_imm, void Thumb2Assembler::Rrx(Register rd, Register rm, Condition cond, SetCc set_cc) { CheckCondition(cond); - EmitShift(rd, rm, RRX, rm, cond, set_cc); + EmitShift(rd, rm, RRX, 0, cond, set_cc); } @@ -3469,6 +3470,73 @@ void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) } } +int32_t Thumb2Assembler::GetAllowedLoadOffsetBits(LoadOperandType type) { + switch (type) { + case kLoadSignedByte: + case kLoadSignedHalfword: + case kLoadUnsignedHalfword: + case kLoadUnsignedByte: + case kLoadWord: + // We can encode imm12 offset. + return 0xfffu; + case kLoadSWord: + case kLoadDWord: + case kLoadWordPair: + // We can encode imm8:'00' offset. + return 0xff << 2; + default: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } +} + +int32_t Thumb2Assembler::GetAllowedStoreOffsetBits(StoreOperandType type) { + switch (type) { + case kStoreHalfword: + case kStoreByte: + case kStoreWord: + // We can encode imm12 offset. + return 0xfff; + case kStoreSWord: + case kStoreDWord: + case kStoreWordPair: + // We can encode imm8:'00' offset. + return 0xff << 2; + default: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } +} + +bool Thumb2Assembler::CanSplitLoadStoreOffset(int32_t allowed_offset_bits, + int32_t offset, + /*out*/ int32_t* add_to_base, + /*out*/ int32_t* offset_for_load_store) { + int32_t other_bits = offset & ~allowed_offset_bits; + if (ShifterOperandCanAlwaysHold(other_bits) || ShifterOperandCanAlwaysHold(-other_bits)) { + *add_to_base = offset & ~allowed_offset_bits; + *offset_for_load_store = offset & allowed_offset_bits; + return true; + } + return false; +} + +int32_t Thumb2Assembler::AdjustLoadStoreOffset(int32_t allowed_offset_bits, + Register temp, + Register base, + int32_t offset, + Condition cond) { + DCHECK_NE(offset & ~allowed_offset_bits, 0); + int32_t add_to_base, offset_for_load; + if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) { + AddConstant(temp, base, add_to_base, cond, kCcKeep); + return offset_for_load; + } else { + LoadImmediate(temp, offset, cond); + add(temp, temp, ShifterOperand(base), cond, kCcKeep); + return 0; + } +} // Implementation note: this method must emit at most one instruction when // Address::CanHoldLoadOffsetThumb. @@ -3479,12 +3547,26 @@ void Thumb2Assembler::LoadFromOffset(LoadOperandType type, Condition cond) { if (!Address::CanHoldLoadOffsetThumb(type, offset)) { CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); - base = IP; - offset = 0; + // Inlined AdjustLoadStoreOffset() allows us to pull a few more tricks. + int32_t allowed_offset_bits = GetAllowedLoadOffsetBits(type); + DCHECK_NE(offset & ~allowed_offset_bits, 0); + int32_t add_to_base, offset_for_load; + if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) { + // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load. + AddConstant(reg, base, add_to_base, cond, kCcKeep); + base = reg; + offset = offset_for_load; + } else { + Register temp = (reg == base) ? IP : reg; + LoadImmediate(temp, offset, cond); + // TODO: Implement indexed load (not available for LDRD) and use it here to avoid the ADD. + // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load. + add(reg, reg, ShifterOperand((reg == base) ? IP : base), cond, kCcKeep); + base = reg; + offset = 0; + } } - CHECK(Address::CanHoldLoadOffsetThumb(type, offset)); + DCHECK(Address::CanHoldLoadOffsetThumb(type, offset)); switch (type) { case kLoadSignedByte: ldrsb(reg, Address(base, offset), cond); @@ -3510,7 +3592,6 @@ void Thumb2Assembler::LoadFromOffset(LoadOperandType type, } } - // Implementation note: this method must emit at most one instruction when // Address::CanHoldLoadOffsetThumb, as expected by JIT::GuardedLoadFromOffset. void Thumb2Assembler::LoadSFromOffset(SRegister reg, @@ -3519,12 +3600,10 @@ void Thumb2Assembler::LoadSFromOffset(SRegister reg, Condition cond) { if (!Address::CanHoldLoadOffsetThumb(kLoadSWord, offset)) { CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); + offset = AdjustLoadStoreOffset(GetAllowedLoadOffsetBits(kLoadSWord), IP, base, offset, cond); base = IP; - offset = 0; } - CHECK(Address::CanHoldLoadOffsetThumb(kLoadSWord, offset)); + DCHECK(Address::CanHoldLoadOffsetThumb(kLoadSWord, offset)); vldrs(reg, Address(base, offset), cond); } @@ -3537,12 +3616,10 @@ void Thumb2Assembler::LoadDFromOffset(DRegister reg, Condition cond) { if (!Address::CanHoldLoadOffsetThumb(kLoadDWord, offset)) { CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); + offset = AdjustLoadStoreOffset(GetAllowedLoadOffsetBits(kLoadDWord), IP, base, offset, cond); base = IP; - offset = 0; } - CHECK(Address::CanHoldLoadOffsetThumb(kLoadDWord, offset)); + DCHECK(Address::CanHoldLoadOffsetThumb(kLoadDWord, offset)); vldrd(reg, Address(base, offset), cond); } @@ -3573,12 +3650,12 @@ void Thumb2Assembler::StoreToOffset(StoreOperandType type, offset += kRegisterSize; } } - LoadImmediate(tmp_reg, offset, cond); - add(tmp_reg, tmp_reg, ShifterOperand(base), AL); + // TODO: Implement indexed store (not available for STRD), inline AdjustLoadStoreOffset() + // and in the "unsplittable" path get rid of the "add" by using the store indexed instead. + offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(type), tmp_reg, base, offset, cond); base = tmp_reg; - offset = 0; } - CHECK(Address::CanHoldStoreOffsetThumb(type, offset)); + DCHECK(Address::CanHoldStoreOffsetThumb(type, offset)); switch (type) { case kStoreByte: strb(reg, Address(base, offset), cond); @@ -3611,12 +3688,10 @@ void Thumb2Assembler::StoreSToOffset(SRegister reg, Condition cond) { if (!Address::CanHoldStoreOffsetThumb(kStoreSWord, offset)) { CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); + offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(kStoreSWord), IP, base, offset, cond); base = IP; - offset = 0; } - CHECK(Address::CanHoldStoreOffsetThumb(kStoreSWord, offset)); + DCHECK(Address::CanHoldStoreOffsetThumb(kStoreSWord, offset)); vstrs(reg, Address(base, offset), cond); } @@ -3629,12 +3704,10 @@ void Thumb2Assembler::StoreDToOffset(DRegister reg, Condition cond) { if (!Address::CanHoldStoreOffsetThumb(kStoreDWord, offset)) { CHECK_NE(base, IP); - LoadImmediate(IP, offset, cond); - add(IP, IP, ShifterOperand(base), cond); + offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(kStoreDWord), IP, base, offset, cond); base = IP; - offset = 0; } - CHECK(Address::CanHoldStoreOffsetThumb(kStoreDWord, offset)); + DCHECK(Address::CanHoldStoreOffsetThumb(kStoreDWord, offset)); vstrd(reg, Address(base, offset), cond); } diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h index e18361300a..9aeece8e57 100644 --- a/compiler/utils/arm/assembler_thumb2.h +++ b/compiler/utils/arm/assembler_thumb2.h @@ -729,13 +729,23 @@ class Thumb2Assembler FINAL : public ArmAssembler { void EmitBranch(Condition cond, Label* label, bool link, bool x); static int32_t EncodeBranchOffset(int32_t offset, int32_t inst); static int DecodeBranchOffset(int32_t inst); - int32_t EncodeTstOffset(int offset, int32_t inst); - int DecodeTstOffset(int32_t inst); void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount, Condition cond = AL, SetCc set_cc = kCcDontCare); void EmitShift(Register rd, Register rn, Shift shift, Register rm, Condition cond = AL, SetCc set_cc = kCcDontCare); + static int32_t GetAllowedLoadOffsetBits(LoadOperandType type); + static int32_t GetAllowedStoreOffsetBits(StoreOperandType type); + bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits, + int32_t offset, + /*out*/ int32_t* add_to_base, + /*out*/ int32_t* offset_for_load_store); + int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits, + Register temp, + Register base, + int32_t offset, + Condition cond); + // Whether the assembler can relocate branches. If false, unresolved branches will be // emitted on 32bits. bool can_relocate_branches_; diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc index cb4b20b5ba..7b32b0fd26 100644 --- a/compiler/utils/arm/assembler_thumb2_test.cc +++ b/compiler/utils/arm/assembler_thumb2_test.cc @@ -243,7 +243,7 @@ TEST_F(AssemblerThumb2Test, sub) { const char* expected = "subs r1, r0, #42\n" - "subw r1, r0, #42\n" + "sub.w r1, r0, #42\n" "subs r1, r0, r2, asr #31\n" "sub r1, r0, r2, asr #31\n"; DriverStr(expected, "sub"); @@ -257,7 +257,7 @@ TEST_F(AssemblerThumb2Test, add) { const char* expected = "adds r1, r0, #42\n" - "addw r1, r0, #42\n" + "add.w r1, r0, #42\n" "adds r1, r0, r2, asr #31\n" "add r1, r0, r2, asr #31\n"; DriverStr(expected, "add"); @@ -305,21 +305,18 @@ TEST_F(AssemblerThumb2Test, StoreWordToNonThumbOffset) { __ StoreToOffset(type, arm::IP, arm::R5, offset); const char* expected = - "mov ip, #4096\n" // LoadImmediate(ip, 4096) - "add ip, ip, sp\n" + "add.w ip, sp, #4096\n" // AddConstant(ip, sp, 4096) "str r0, [ip, #0]\n" - "str r5, [sp, #-4]!\n" // Push(r5) - "movw r5, #4100\n" // LoadImmediate(r5, 4096 + kRegisterSize) - "add r5, r5, sp\n" - "str ip, [r5, #0]\n" - "ldr r5, [sp], #4\n" // Pop(r5) - - "str r6, [sp, #-4]!\n" // Push(r6) - "mov r6, #4096\n" // LoadImmediate(r6, 4096) - "add r6, r6, r5\n" - "str ip, [r6, #0]\n" - "ldr r6, [sp], #4\n"; // Pop(r6) + "str r5, [sp, #-4]!\n" // Push(r5) + "add.w r5, sp, #4096\n" // AddConstant(r5, 4100 & ~0xfff) + "str ip, [r5, #4]\n" // StoreToOffset(type, ip, r5, 4100 & 0xfff) + "ldr r5, [sp], #4\n" // Pop(r5) + + "str r6, [sp, #-4]!\n" // Push(r6) + "add.w r6, r5, #4096\n" // AddConstant(r6, r5, 4096 & ~0xfff) + "str ip, [r6, #0]\n" // StoreToOffset(type, ip, r6, 4096 & 0xfff) + "ldr r6, [sp], #4\n"; // Pop(r6) DriverStr(expected, "StoreWordToNonThumbOffset"); } @@ -360,20 +357,17 @@ TEST_F(AssemblerThumb2Test, StoreWordPairToNonThumbOffset) { __ StoreToOffset(type, arm::R11, arm::R5, offset); const char* expected = - "mov ip, #1024\n" // LoadImmediate(ip, 1024) - "add ip, ip, sp\n" + "add.w ip, sp, #1024\n" // AddConstant(ip, sp, 1024) "strd r0, r1, [ip, #0]\n" "str r5, [sp, #-4]!\n" // Push(r5) - "movw r5, #1028\n" // LoadImmediate(r5, 1024 + kRegisterSize) - "add r5, r5, sp\n" - "strd r11, ip, [r5, #0]\n" + "add.w r5, sp, #1024\n" // AddConstant(r5, sp, (1024 + kRegisterSize) & ~0x3fc) + "strd r11, ip, [r5, #4]\n" // StoreToOffset(type, r11, sp, (1024 + kRegisterSize) & 0x3fc) "ldr r5, [sp], #4\n" // Pop(r5) "str r6, [sp, #-4]!\n" // Push(r6) - "mov r6, #1024\n" // LoadImmediate(r6, 1024) - "add r6, r6, r5\n" - "strd r11, ip, [r6, #0]\n" + "add.w r6, r5, #1024\n" // AddConstant(r6, r5, 1024 & ~0x3fc) + "strd r11, ip, [r6, #0]\n" // StoreToOffset(type, r11, r6, 1024 & 0x3fc) "ldr r6, [sp], #4\n"; // Pop(r6) DriverStr(expected, "StoreWordPairToNonThumbOffset"); } diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 2ae88413e7..1de51a2dc8 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -466,6 +466,38 @@ TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) { EmitAndCheck(&assembler, "DataProcessingShiftedRegister"); } +TEST(Thumb2AssemblerTest, ShiftImmediate) { + // Note: This test produces the same results as DataProcessingShiftedRegister + // but it does so using shift functions instead of mov(). + arm::Thumb2Assembler assembler; + + // 16-bit variants. + __ Lsl(R3, R4, 4); + __ Lsr(R3, R4, 5); + __ Asr(R3, R4, 6); + + // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts. + __ Ror(R3, R4, 7); + + // 32-bit RRX because RRX has no 16-bit version. + __ Rrx(R3, R4); + + // 32 bit variants (not setting condition codes). + __ Lsl(R3, R4, 4, AL, kCcKeep); + __ Lsr(R3, R4, 5, AL, kCcKeep); + __ Asr(R3, R4, 6, AL, kCcKeep); + __ Ror(R3, R4, 7, AL, kCcKeep); + __ Rrx(R3, R4, AL, kCcKeep); + + // 32 bit variants (high registers). + __ Lsls(R8, R4, 4); + __ Lsrs(R8, R4, 5); + __ Asrs(R8, R4, 6); + __ Rors(R8, R4, 7); + __ Rrxs(R8, R4); + + EmitAndCheck(&assembler, "ShiftImmediate"); +} TEST(Thumb2AssemblerTest, BasicLoad) { arm::Thumb2Assembler assembler; @@ -823,29 +855,80 @@ TEST(Thumb2AssemblerTest, SpecialAddSub) { __ add(R2, SP, ShifterOperand(0xf00)); // 32 bit due to imm size. __ add(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size. + __ add(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4. - __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit - __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit - __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit. + __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit + __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit + __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit. - __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size + __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size + __ sub(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4. EmitAndCheck(&assembler, "SpecialAddSub"); } +TEST(Thumb2AssemblerTest, LoadFromOffset) { + arm::Thumb2Assembler assembler; + + __ LoadFromOffset(kLoadWord, R2, R4, 12); + __ LoadFromOffset(kLoadWord, R2, R4, 0xfff); + __ LoadFromOffset(kLoadWord, R2, R4, 0x1000); + __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4); + __ LoadFromOffset(kLoadWord, R2, R4, 0x101000); + __ LoadFromOffset(kLoadWord, R4, R4, 0x101000); + __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12); + __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff); + __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000); + __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4); + __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000); + __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000); + __ LoadFromOffset(kLoadWordPair, R2, R4, 12); + __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc); + __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400); + __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4); + __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400); + __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400); + + __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12. + __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000); + + __ LoadFromOffset(kLoadSignedByte, R2, R4, 12); + __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12); + __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12); + + EmitAndCheck(&assembler, "LoadFromOffset"); +} + TEST(Thumb2AssemblerTest, StoreToOffset) { arm::Thumb2Assembler assembler; - __ StoreToOffset(kStoreWord, R2, R4, 12); // Simple - __ StoreToOffset(kStoreWord, R2, R4, 0x2000); // Offset too big. - __ StoreToOffset(kStoreWord, R0, R12, 12); - __ StoreToOffset(kStoreHalfword, R0, R12, 12); - __ StoreToOffset(kStoreByte, R2, R12, 12); + __ StoreToOffset(kStoreWord, R2, R4, 12); + __ StoreToOffset(kStoreWord, R2, R4, 0xfff); + __ StoreToOffset(kStoreWord, R2, R4, 0x1000); + __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4); + __ StoreToOffset(kStoreWord, R2, R4, 0x101000); + __ StoreToOffset(kStoreWord, R4, R4, 0x101000); + __ StoreToOffset(kStoreHalfword, R2, R4, 12); + __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff); + __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000); + __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4); + __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000); + __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000); + __ StoreToOffset(kStoreWordPair, R2, R4, 12); + __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc); + __ StoreToOffset(kStoreWordPair, R2, R4, 0x400); + __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4); + __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400); + __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400); + + __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12. + __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000); + + __ StoreToOffset(kStoreByte, R2, R4, 12); EmitAndCheck(&assembler, "StoreToOffset"); } - TEST(Thumb2AssemblerTest, IfThen) { arm::Thumb2Assembler assembler; diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc index b79c2e46f0..9246c827a7 100644 --- a/compiler/utils/assembler_thumb_test_expected.cc.inc +++ b/compiler/utils/assembler_thumb_test_expected.cc.inc @@ -132,8 +132,8 @@ const char* DataProcessingRegisterResults[] = { const char* DataProcessingImmediateResults[] = { " 0: 2055 movs r0, #85 ; 0x55\n", " 2: f06f 0055 mvn.w r0, #85 ; 0x55\n", - " 6: f201 0055 addw r0, r1, #85 ; 0x55\n", - " a: f2a1 0055 subw r0, r1, #85 ; 0x55\n", + " 6: f101 0055 add.w r0, r1, #85 ; 0x55\n", + " a: f1a1 0055 sub.w r0, r1, #85 ; 0x55\n", " e: f001 0055 and.w r0, r1, #85 ; 0x55\n", " 12: f041 0055 orr.w r0, r1, #85 ; 0x55\n", " 16: f061 0055 orn r0, r1, #85 ; 0x55\n", @@ -201,6 +201,24 @@ const char* DataProcessingShiftedRegisterResults[] = { " 32: ea5f 0834 movs.w r8, r4, rrx\n", nullptr }; +const char* ShiftImmediateResults[] = { + " 0: 0123 lsls r3, r4, #4\n", + " 2: 0963 lsrs r3, r4, #5\n", + " 4: 11a3 asrs r3, r4, #6\n", + " 6: ea4f 13f4 mov.w r3, r4, ror #7\n", + " a: ea4f 0334 mov.w r3, r4, rrx\n", + " e: ea4f 1304 mov.w r3, r4, lsl #4\n", + " 12: ea4f 1354 mov.w r3, r4, lsr #5\n", + " 16: ea4f 13a4 mov.w r3, r4, asr #6\n", + " 1a: ea4f 13f4 mov.w r3, r4, ror #7\n", + " 1e: ea4f 0334 mov.w r3, r4, rrx\n", + " 22: ea5f 1804 movs.w r8, r4, lsl #4\n", + " 26: ea5f 1854 movs.w r8, r4, lsr #5\n", + " 2a: ea5f 18a4 movs.w r8, r4, asr #6\n", + " 2e: ea5f 18f4 movs.w r8, r4, ror #7\n", + " 32: ea5f 0834 movs.w r8, r4, rrx\n", + nullptr +}; const char* BasicLoadResults[] = { " 0: 69a3 ldr r3, [r4, #24]\n", " 2: 7e23 ldrb r3, [r4, #24]\n", @@ -434,23 +452,115 @@ const char* MovWMovTResults[] = { const char* SpecialAddSubResults[] = { " 0: aa14 add r2, sp, #80 ; 0x50\n", " 2: b014 add sp, #80 ; 0x50\n", - " 4: f20d 0850 addw r8, sp, #80 ; 0x50\n", - " 8: f60d 7200 addw r2, sp, #3840 ; 0xf00\n", - " c: f60d 7d00 addw sp, sp, #3840 ; 0xf00\n", - " 10: b094 sub sp, #80 ; 0x50\n", - " 12: f2ad 0050 subw r0, sp, #80 ; 0x50\n", - " 16: f2ad 0850 subw r8, sp, #80 ; 0x50\n", - " 1a: f6ad 7d00 subw sp, sp, #3840 ; 0xf00\n", + " 4: f10d 0850 add.w r8, sp, #80 ; 0x50\n", + " 8: f50d 6270 add.w r2, sp, #3840 ; 0xf00\n", + " c: f50d 6d70 add.w sp, sp, #3840 ; 0xf00\n", + " 10: f60d 7dfc addw sp, sp, #4092 ; 0xffc\n", + " 14: b094 sub sp, #80 ; 0x50\n", + " 16: f1ad 0050 sub.w r0, sp, #80 ; 0x50\n", + " 1a: f1ad 0850 sub.w r8, sp, #80 ; 0x50\n", + " 1e: f5ad 6d70 sub.w sp, sp, #3840 ; 0xf00\n", + " 22: f6ad 7dfc subw sp, sp, #4092 ; 0xffc\n", + nullptr +}; +const char* LoadFromOffsetResults[] = { + " 0: 68e2 ldr r2, [r4, #12]\n", + " 2: f8d4 2fff ldr.w r2, [r4, #4095] ; 0xfff\n", + " 6: f504 5280 add.w r2, r4, #4096 ; 0x1000\n", + " a: 6812 ldr r2, [r2, #0]\n", + " c: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n", + " 10: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n", + " 14: f241 0200 movw r2, #4096 ; 0x1000\n", + " 18: f2c0 0210 movt r2, #16\n", + " 1c: 4422 add r2, r4\n", + " 1e: 6812 ldr r2, [r2, #0]\n", + " 20: f241 0c00 movw ip, #4096 ; 0x1000\n", + " 24: f2c0 0c10 movt ip, #16\n", + " 28: 4464 add r4, ip\n", + " 2a: 6824 ldr r4, [r4, #0]\n", + " 2c: 89a2 ldrh r2, [r4, #12]\n", + " 2e: f8b4 2fff ldrh.w r2, [r4, #4095] ; 0xfff\n", + " 32: f504 5280 add.w r2, r4, #4096 ; 0x1000\n", + " 36: 8812 ldrh r2, [r2, #0]\n", + " 38: f504 1280 add.w r2, r4, #1048576 ; 0x100000\n", + " 3c: f8b2 20a4 ldrh.w r2, [r2, #164] ; 0xa4\n", + " 40: f241 0200 movw r2, #4096 ; 0x1000\n", + " 44: f2c0 0210 movt r2, #16\n", + " 48: 4422 add r2, r4\n", + " 4a: 8812 ldrh r2, [r2, #0]\n", + " 4c: f241 0c00 movw ip, #4096 ; 0x1000\n", + " 50: f2c0 0c10 movt ip, #16\n", + " 54: 4464 add r4, ip\n", + " 56: 8824 ldrh r4, [r4, #0]\n", + " 58: e9d4 2303 ldrd r2, r3, [r4, #12]\n", + " 5c: e9d4 23ff ldrd r2, r3, [r4, #1020] ; 0x3fc\n", + " 60: f504 6280 add.w r2, r4, #1024 ; 0x400\n", + " 64: e9d2 2300 ldrd r2, r3, [r2]\n", + " 68: f504 2280 add.w r2, r4, #262144 ; 0x40000\n", + " 6c: e9d2 2329 ldrd r2, r3, [r2, #164]; 0xa4\n", + " 70: f240 4200 movw r2, #1024 ; 0x400\n", + " 74: f2c0 0204 movt r2, #4\n", + " 78: 4422 add r2, r4\n", + " 7a: e9d2 2300 ldrd r2, r3, [r2]\n", + " 7e: f240 4c00 movw ip, #1024 ; 0x400\n", + " 82: f2c0 0c04 movt ip, #4\n", + " 86: 4464 add r4, ip\n", + " 88: e9d4 4500 ldrd r4, r5, [r4]\n", + " 8c: f8dc 000c ldr.w r0, [ip, #12]\n", + " 90: f5a4 1280 sub.w r2, r4, #1048576 ; 0x100000\n", + " 94: f8d2 20a4 ldr.w r2, [r2, #164] ; 0xa4\n", + " 98: f994 200c ldrsb.w r2, [r4, #12]\n", + " 9c: 7b22 ldrb r2, [r4, #12]\n", + " 9e: f9b4 200c ldrsh.w r2, [r4, #12]\n", nullptr }; const char* StoreToOffsetResults[] = { " 0: 60e2 str r2, [r4, #12]\n", - " 2: f44f 5c00 mov.w ip, #8192 ; 0x2000\n", - " 6: 44a4 add ip, r4\n", - " 8: f8cc 2000 str.w r2, [ip]\n", - " c: f8cc 000c str.w r0, [ip, #12]\n", - " 10: f8ac 000c strh.w r0, [ip, #12]\n", - " 14: f88c 200c strb.w r2, [ip, #12]\n", + " 2: f8c4 2fff str.w r2, [r4, #4095] ; 0xfff\n", + " 6: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n", + " a: f8cc 2000 str.w r2, [ip]\n", + " e: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n", + " 12: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n", + " 16: f241 0c00 movw ip, #4096 ; 0x1000\n", + " 1a: f2c0 0c10 movt ip, #16\n", + " 1e: 44a4 add ip, r4\n", + " 20: f8cc 2000 str.w r2, [ip]\n", + " 24: f241 0c00 movw ip, #4096 ; 0x1000\n", + " 28: f2c0 0c10 movt ip, #16\n", + " 2c: 44a4 add ip, r4\n", + " 2e: f8cc 4000 str.w r4, [ip]\n", + " 32: 81a2 strh r2, [r4, #12]\n", + " 34: f8a4 2fff strh.w r2, [r4, #4095] ; 0xfff\n", + " 38: f504 5c80 add.w ip, r4, #4096 ; 0x1000\n", + " 3c: f8ac 2000 strh.w r2, [ip]\n", + " 40: f504 1c80 add.w ip, r4, #1048576 ; 0x100000\n", + " 44: f8ac 20a4 strh.w r2, [ip, #164] ; 0xa4\n", + " 48: f241 0c00 movw ip, #4096 ; 0x1000\n", + " 4c: f2c0 0c10 movt ip, #16\n", + " 50: 44a4 add ip, r4\n", + " 52: f8ac 2000 strh.w r2, [ip]\n", + " 56: f241 0c00 movw ip, #4096 ; 0x1000\n", + " 5a: f2c0 0c10 movt ip, #16\n", + " 5e: 44a4 add ip, r4\n", + " 60: f8ac 4000 strh.w r4, [ip]\n", + " 64: e9c4 2303 strd r2, r3, [r4, #12]\n", + " 68: e9c4 23ff strd r2, r3, [r4, #1020] ; 0x3fc\n", + " 6c: f504 6c80 add.w ip, r4, #1024 ; 0x400\n", + " 70: e9cc 2300 strd r2, r3, [ip]\n", + " 74: f504 2c80 add.w ip, r4, #262144 ; 0x40000\n", + " 78: e9cc 2329 strd r2, r3, [ip, #164]; 0xa4\n", + " 7c: f240 4c00 movw ip, #1024 ; 0x400\n", + " 80: f2c0 0c04 movt ip, #4\n", + " 84: 44a4 add ip, r4\n", + " 86: e9cc 2300 strd r2, r3, [ip]\n", + " 8a: f240 4c00 movw ip, #1024 ; 0x400\n", + " 8e: f2c0 0c04 movt ip, #4\n", + " 92: 44a4 add ip, r4\n", + " 94: e9cc 4500 strd r4, r5, [ip]\n", + " 98: f8cc 000c str.w r0, [ip, #12]\n", + " 9c: f5a4 1c80 sub.w ip, r4, #1048576 ; 0x100000\n", + " a0: f8cc 20a4 str.w r2, [ip, #164] ; 0xa4\n", + " a4: 7322 strb r2, [r4, #12]\n", nullptr }; const char* IfThenResults[] = { @@ -4952,6 +5062,7 @@ void setup_results() { test_results["DataProcessingModifiedImmediate"] = DataProcessingModifiedImmediateResults; test_results["DataProcessingModifiedImmediates"] = DataProcessingModifiedImmediatesResults; test_results["DataProcessingShiftedRegister"] = DataProcessingShiftedRegisterResults; + test_results["ShiftImmediate"] = ShiftImmediateResults; test_results["BasicLoad"] = BasicLoadResults; test_results["BasicStore"] = BasicStoreResults; test_results["ComplexLoad"] = ComplexLoadResults; @@ -4966,6 +5077,7 @@ void setup_results() { test_results["StoreMultiple"] = StoreMultipleResults; test_results["MovWMovT"] = MovWMovTResults; test_results["SpecialAddSub"] = SpecialAddSubResults; + test_results["LoadFromOffset"] = LoadFromOffsetResults; test_results["StoreToOffset"] = StoreToOffsetResults; test_results["IfThen"] = IfThenResults; test_results["CbzCbnz"] = CbzCbnzResults; diff --git a/runtime/Android.mk b/runtime/Android.mk index 4f4792a709..0b0f0942a3 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -106,7 +106,6 @@ LIBART_COMMON_SRC_FILES := \ jit/profiling_info.cc \ lambda/art_lambda_method.cc \ lambda/box_table.cc \ - lambda/box_class_table.cc \ lambda/closure.cc \ lambda/closure_builder.cc \ lambda/leaking_allocator.cc \ diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc index 771c8b7e2f..d6ba304bd1 100644 --- a/runtime/arch/arch_test.cc +++ b/runtime/arch/arch_test.cc @@ -46,15 +46,9 @@ class ArchTest : public CommonRuntimeTest { } }; -} // namespace art - // Common tests are declared next to the constants. #define ADD_TEST_EQ(x, y) EXPECT_EQ(x, y); #include "asm_support.h" -// Important: Do not include this inside of another namespace, since asm_support.h -// defines its own namespace which must not be nested. - -namespace art { TEST_F(ArchTest, CheckCommonOffsetsAndSizes) { CheckAsmSupportOffsetsAndSizes(); diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index 588268d878..631b784787 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -1045,26 +1045,6 @@ ENTRY art_quick_proxy_invoke_handler DELIVER_PENDING_EXCEPTION END art_quick_proxy_invoke_handler -// Forward call from boxed innate lambda to the underlying lambda closure's target method. - .extern artQuickLambdaProxyInvokeHandler -ENTRY art_quick_lambda_proxy_invoke_handler -// TODO: have a faster handler that doesn't need to set up a frame - SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0 - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - blx artQuickLambdaProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) - ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - // Tear down the callee-save frame. Skip arg registers. - add sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE) - .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME - cbnz r2, 1f @ success if no exception is pending - vmov d0, r0, r1 @ store into fpr, for when it's a fpr return... - bx lr @ return on success -1: - DELIVER_PENDING_EXCEPTION -END art_quick_lambda_proxy_invoke_handler - /* * Called to resolve an imt conflict. r12 is a hidden argument that holds the target method's * dex method index. diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 177873d73f..9ccabad1cc 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -1582,28 +1582,6 @@ ENTRY art_quick_proxy_invoke_handler DELIVER_PENDING_EXCEPTION END art_quick_proxy_invoke_handler - /* - * Called by managed code that is attempting to call a method on a lambda proxy class. On entry - * x0 holds the lambda proxy method and x1 holds the receiver; The frame size of the invoked - * lambda proxy method agrees with a ref and args callee save frame. - */ - .extern artQuickLambdaProxyInvokeHandler -ENTRY art_quick_lambda_proxy_invoke_handler -// TODO: have a faster way to invoke lambda proxies without setting up the whole frame. - SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0 - mov x2, xSELF // pass Thread::Current - mov x3, sp // pass SP - bl artQuickLambdaProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP) - ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] - cbnz x2, .Lexception_in_lambda_proxy // success if no exception is pending - RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame - fmov d0, x0 // Store result in d0 in case it was float or double - ret // return on success -.Lexception_in_lambda_proxy: - RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME - DELIVER_PENDING_EXCEPTION -END art_quick_lambda_proxy_invoke_handler - /* * Called to resolve an imt conflict. xIP1 is a hidden argument that holds the target method's * dex method index. diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index af79f5ef4d..0691f2a620 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -1377,10 +1377,6 @@ ENTRY art_quick_proxy_invoke_handler DELIVER_PENDING_EXCEPTION END art_quick_proxy_invoke_handler -// Forward call from boxed innate lambda to the underlying lambda closure's target method. - .extern artQuickLambdaProxyInvokeHandler -UNIMPLEMENTED art_quick_lambda_proxy_invoke_handler - /* * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's * dex method index. diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index 5e70a95a2d..66c8aadf33 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -1431,10 +1431,6 @@ ENTRY art_quick_proxy_invoke_handler DELIVER_PENDING_EXCEPTION END art_quick_proxy_invoke_handler -// Forward call from boxed innate lambda to the underlying lambda closure's target method. - .extern artQuickLambdaProxyInvokeHandler -UNIMPLEMENTED art_quick_lambda_proxy_invoke_handler - /* * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's * dex method index. diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 4fb6119ac1..463c9cf10e 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -1391,149 +1391,6 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception END_FUNCTION art_quick_proxy_invoke_handler -#if LAMBDA_INVOKE_USES_LONG -#undef LAMBDA_PROXY_SETUP_FRAME -// We need to always do a 'pop' to readjust the stack, so we have to use the slower call instruction. -#define LAMBDA_PROXY_SETUP_FRAME 1 -#define LAMBDA_INVOKE_REALIGN_STACK_FRAME 1 -#else -#define LAMBDA_INVOKE_REALIGN_STACK_FRAME 0 -#endif - -#define LAMBDA_INVOKE_CALLS_INTO_RUNTIME LAMBDA_INVOKE_REALIGN_STACK_FRAME - -// Forward call from boxed innate lambda to the underlying lambda closure's target method. -DEFINE_FUNCTION art_quick_lambda_proxy_invoke_handler - // This function is always called when the lambda is innate. - // Therefore we can assume the box is to an innate lambda. - // TODO: perhaps there should be a DCHECK to make sure it's innate? - -#if LAMBDA_PROXY_SETUP_FRAME - // Set up a quick frame when debugging so we can see that it's going through a stub. - // An invoke-virtual + a stub invocation is enough of a hint that we *could* be - // going through a lambda proxy. - SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX -#endif - -#if !LAMBDA_INVOKE_CALLS_INTO_RUNTIME - // Rewrite the following 2 arguments, stored on stack frame: - // - // |--------| - // |receiver| <- esp-4 - // |--------| - // | method | <- esp - // |--------| - - // Set up the new correct method receiver (swap object with closure). - // -- The original object is no longer available after this. - // - // (Before) - // ecx == mirror::Object* boxed_lambda; // lambda proxy object. - movl MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET(%ecx), %ecx - // (After) - // lambda::Closure* closure = boxed_lambda->closure_; - // boxed_lambda = closure; // Overwrite lambda proxy object - // ecx == closure - - // Look up the new correct method target. - // -- The original method target is no longer available after this. - // - // (Before) - // eax == ArtMethod* old_receiver_method; - movl LAMBDA_CLOSURE_METHOD_OFFSET(%ecx), %eax - // (After) - // ArtLambdaMethod* lambda_method_target = closure->lambda_info_; - // eax = lambda_method_target - // - // Set up the correct method target from the lambda info. - movl ART_LAMBDA_METHOD_ART_METHOD_OFFSET(%eax), %eax // Load new receiver method - // (After) - // ArtMethod* target_method = lambda_method_target->target_ - // eax = target_method -#endif - -#if LAMBDA_INVOKE_CALLS_INTO_RUNTIME - PUSH esp // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass receiver - PUSH eax // pass proxy method - call SYMBOL(artQuickLambdaProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) - movd %eax, %xmm0 // place return value also into floating point return value - movd %edx, %xmm1 - punpckldq %xmm1, %xmm0 - addl LITERAL(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE), %esp - CFI_ADJUST_CFA_OFFSET(-(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME - RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -#endif - -#if LAMBDA_INVOKE_USES_LONG && !LAMBDA_INVOKE_REALIGN_STACK_FRAME - // As a temporary workaround, lambda functions look like - // (J[Arg2][Arg3][Arg4]...) - // This means that we can't just pass in the lambda as a 32-bit pointer - // We pad the arguments with an extra 32-bit "0" where Arg2 used to be instead. - - // Required arguments for a lambda method: - // - // Arg0 = eax = method - // Arg1 = ecx = closure (hi) - // Arg2 = edx = closure (lo) - // Arg3 = ebx = <?> (first user-defined argument) - - // Transformation diagram: - // - // Arg0 Arg1 Arg2 Arg3 ... ArgN - // | | \ \ \ - // | | \ \ \ - // Arg0 Arg1 0x00 Arg2 Arg3 ... ArgN - // /\ - // (inserted) - PUSH ebx // Move out Arg3 into Arg4, and also for all K>3 ArgK into ArgK+1 - mov %edx, %ebx // Move out Arg2 into Arg3 - xor %edx, %edx // Clear closure 32-bit low register - - // XX: Does this work at all ? This probably breaks the visitors (*and* its unaligned). - - // FIXME: call into the runtime and do a proxy-like-invoke - // using a ShadowFrame quick visitor, and then use ArtMethod::Invoke - // to call into the actual method (which will take care of fixing up alignment). - // Trying to realign in the assembly itself won't actually work - // since then the visitor will unwind incorrectly (unless we also fixed up the ManagedStack). -#endif - - // TODO: avoid extra indirect load by subclass ArtLambdaMethod from ArtMethod. - - // Forward the call to the overwritten receiver method. - // -- Arguments [2,N] are left completely untouched since the signature is otherwise identical. -#if LAMBDA_PROXY_SETUP_FRAME - #if LAMBDA_INVOKE_CALLS_INTO_RUNTIME - // Have to call into runtime in order to re-align the stack frame to 16 bytes. - int3 - #else - // Just call into the method directly. Don't worry about realigning. - call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // (new method, new receiver, old args...) - - // The stack frame was manually adjusted, so make sure we have a pop here to fix it back. - #if LAMBDA_INVOKE_USES_LONG && !LAMBDA_INVOKE_REALIGN_STACK_FRAME - - POP ecx // OK: ecx is scratch register after the call. - // XX: use 'add esp, 4' instead if we need to keep the register? This way we get cleaner CFI. - #endif - #endif - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME - -#else - // Do not use 'call' here since the stack visitors wouldn't know how to visit this frame. - jmp *ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // tailcall (new method, new receiver, old args...) -#endif - -#if LAMBDA_PROXY_SETUP_FRAME - ret -#endif - -END_FUNCTION art_quick_lambda_proxy_invoke_handler - /* * Called to resolve an imt conflict. xmm7 is a hidden argument that holds the target method's * dex method index. diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 0a54aa34ca..17d277e1fd 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -1297,6 +1297,7 @@ DEFINE_FUNCTION art_quick_set64_static RETURN_IF_EAX_ZERO // return or deliver exception END_FUNCTION art_quick_set64_static + DEFINE_FUNCTION art_quick_proxy_invoke_handler SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI @@ -1308,60 +1309,6 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler RETURN_OR_DELIVER_PENDING_EXCEPTION END_FUNCTION art_quick_proxy_invoke_handler -// Forward call from boxed innate lambda to the underlying lambda closure's target method. -DEFINE_FUNCTION art_quick_lambda_proxy_invoke_handler - // This function is always called when the lambda is innate. - // Therefore we can assume the box is to an innate lambda. - // TODO: perhaps there should be a DCHECK to make sure it's innate? - -#if LAMBDA_PROXY_SETUP_FRAME - // Set up a quick frame when debugging so we can see that it's going through a stub. - // Our stack traces will contain the quick lambda proxy hander. - // Note that we *must* go through the handler (when spilling) otherwise we won't know how - // to move the spilled GC references from the caller to this stub. - SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI - - movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current(). - movq %rsp, %rcx // Pass SP. - call SYMBOL(artQuickLambdaProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) - RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME - movq %rax, %xmm0 // Copy return value in case of float returns. - RETURN_OR_DELIVER_PENDING_EXCEPTION -#else - // Set up the new correct method receiver (swap object with closure). - // -- The original object is no longer available after this. - // - // (Before) - // rsi == mirror::Object* boxed_lambda; // lambda proxy object. - movq MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET(%rsi), %rsi - // (After) - // lambda::Closure* closure = boxed_lambda->closure_; // Overwrite receiver object. - // rsi == closure - - // Look up the new correct method target. - // -- The original method target is no longer available after this. - movq LAMBDA_CLOSURE_METHOD_OFFSET(%rsi), %rdi // Overwrite old receiver method. - // (After) - // ArtLambdaMethod* lambda_method_target = closure->lambda_info_; - // rdi == lambda_method_target - - // TODO: avoid extra indirect load by subclass ArtLambdaMethod from ArtMethod. - - // Set up the correct method target from the lambda info. - movq ART_LAMBDA_METHOD_ART_METHOD_OFFSET(%rdi), %rdi // Write new receiver method. - // (After) - // ArtMethod* method_target = lambda_method_target->target_; - // rdi == method_target - - // Forward the call to the overwritten receiver method. - // -- Arguments [2,N] are left completely untouched since the signature is otherwise identical. - // Do not use 'call' here since the stack would be misaligned (8b instead of 16b). - // Also the stack visitors wouldn't know how to visit this frame if we used a call. - jmp *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // tailcall (new method, new receiver, old args...) -#endif - -END_FUNCTION art_quick_lambda_proxy_invoke_handler - /* * Called to resolve an imt conflict. * rax is a hidden argument that holds the target method's dex method index. diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h index ab42d0ec95..4166e22daa 100644 --- a/runtime/art_field-inl.h +++ b/runtime/art_field-inl.h @@ -255,7 +255,7 @@ inline void ArtField::SetObject(mirror::Object* object, mirror::Object* l) { inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t field_index = GetDexFieldIndex(); - if (UNLIKELY(GetDeclaringClass()->IsAnyProxyClass())) { + if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) { DCHECK(IsStatic()); DCHECK_LT(field_index, 2U); return field_index == 0 ? "interfaces" : "throws"; @@ -266,7 +266,7 @@ inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) { inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t field_index = GetDexFieldIndex(); - if (UNLIKELY(GetDeclaringClass()->IsAnyProxyClass())) { + if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) { DCHECK(IsStatic()); DCHECK_LT(field_index, 2U); // 0 == Class[] interfaces; 1 == Class[][] throws; @@ -290,8 +290,8 @@ template <bool kResolve> inline mirror::Class* ArtField::GetType() { const uint32_t field_index = GetDexFieldIndex(); auto* declaring_class = GetDeclaringClass(); - if (UNLIKELY(declaring_class->IsAnyProxyClass())) { - return AnyProxyFindSystemClass(GetTypeDescriptor()); + if (UNLIKELY(declaring_class->IsProxyClass())) { + return ProxyFindSystemClass(GetTypeDescriptor()); } auto* dex_cache = declaring_class->GetDexCache(); const DexFile* const dex_file = dex_cache->GetDexFile(); diff --git a/runtime/art_field.cc b/runtime/art_field.cc index 3ac563a789..3737e0ddee 100644 --- a/runtime/art_field.cc +++ b/runtime/art_field.cc @@ -69,8 +69,8 @@ ArtField* ArtField::FindStaticFieldWithOffset(mirror::Class* klass, uint32_t fie return nullptr; } -mirror::Class* ArtField::AnyProxyFindSystemClass(const char* descriptor) { - DCHECK(GetDeclaringClass()->IsAnyProxyClass()); +mirror::Class* ArtField::ProxyFindSystemClass(const char* descriptor) { + DCHECK(GetDeclaringClass()->IsProxyClass()); return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor); } diff --git a/runtime/art_field.h b/runtime/art_field.h index 4ebe6fbab4..a943a34174 100644 --- a/runtime/art_field.h +++ b/runtime/art_field.h @@ -191,9 +191,7 @@ class ArtField FINAL { } private: - mirror::Class* AnyProxyFindSystemClass(const char* descriptor) - SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Class* LambdaProxyFindSystemClass(const char* descriptor) + mirror::Class* ProxyFindSystemClass(const char* descriptor) SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_); mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx, diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index b6e811f7b2..cf548ada33 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -292,7 +292,7 @@ inline const char* ArtMethod::GetDeclaringClassDescriptor() { } inline const char* ArtMethod::GetShorty(uint32_t* out_length) { - DCHECK(!IsProxyMethod() || IsLambdaProxyMethod()); // OK: lambda proxies use parent dex cache. + DCHECK(!IsProxyMethod()); const DexFile* dex_file = GetDexFile(); return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex()), out_length); } @@ -354,31 +354,10 @@ inline const DexFile::ProtoId& ArtMethod::GetPrototype() { } inline const DexFile::TypeList* ArtMethod::GetParameterTypeList() { - // XX: Do proxy methods have a dex file? not sure. + DCHECK(!IsProxyMethod()); const DexFile* dex_file = GetDexFile(); - const DexFile::MethodId* method_id = nullptr; - - if (kIsDebugBuild) { - if (UNLIKELY(IsProxyMethod())) { - // Proxy method case. - CHECK(IsLambdaProxyMethod()) << "Cannot GetParameterTypeList for java.lang.reflect.Proxy"; - - // - // We do not have a method ID, so look up one of the supers we overrode, - // it will have the same exact parameter type list as we do. - - // Lambda proxy classes have the dex cache from their single interface parent. - // Proxy classes have multiple interface parents, so they use the root dexcache instead. - // - // For lambda proxy classes only, get the type list data from the parent. - // (code happens to look the same as the usual non-proxy path). - } - } - - method_id = &dex_file->GetMethodId(GetDexMethodIndex()); - DCHECK(method_id != nullptr); - - const DexFile::ProtoId& proto = dex_file->GetMethodPrototype(*method_id); + const DexFile::ProtoId& proto = dex_file->GetMethodPrototype( + dex_file->GetMethodId(GetDexMethodIndex())); return dex_file->GetProtoParameters(proto); } @@ -418,20 +397,12 @@ inline mirror::ClassLoader* ArtMethod::GetClassLoader() { } inline mirror::DexCache* ArtMethod::GetDexCache() { - DCHECK(!IsProxyMethod() || IsLambdaProxyMethod()); // OK: lambda proxies use parent dex cache. + DCHECK(!IsProxyMethod()); return GetDeclaringClass()->GetDexCache(); } inline bool ArtMethod::IsProxyMethod() { - return GetDeclaringClass()->IsAnyProxyClass(); -} - -inline bool ArtMethod::IsReflectProxyMethod() { - return GetDeclaringClass()->IsReflectProxyClass(); -} - -inline bool ArtMethod::IsLambdaProxyMethod() { - return GetDeclaringClass()->IsLambdaProxyClass(); + return GetDeclaringClass()->IsProxyClass(); } inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) { @@ -477,9 +448,9 @@ template<typename RootVisitorType> void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) { ArtMethod* interface_method = nullptr; mirror::Class* klass = declaring_class_.Read(); - if (UNLIKELY(klass != nullptr && klass->IsAnyProxyClass())) { + if (UNLIKELY(klass != nullptr && klass->IsProxyClass())) { // For normal methods, dex cache shortcuts will be visited through the declaring class. - // However, for any proxies we need to keep the interface method alive, so we visit its roots. + // However, for proxies we need to keep the interface method alive, so we visit its roots. interface_method = mirror::DexCache::GetElementPtrSize( GetDexCacheResolvedMethods(pointer_size), GetDexMethodIndex(), diff --git a/runtime/art_method.h b/runtime/art_method.h index 98f5aeeb4c..5a2d6c36ed 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -171,16 +171,8 @@ class ArtMethod FINAL { return (GetAccessFlags() & kAccSynthetic) != 0; } - // Does this method live on a declaring class that is itself any proxy class? - // -- Returns true for both java.lang.reflect.Proxy and java.lang.LambdaProxy subclasses. bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_); - // Does this method live in a java.lang.reflect.Proxy subclass? - bool IsReflectProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_); - - // Does this method live in a java.lang.LambdaProxy subclass? - bool IsLambdaProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsPreverified() { return (GetAccessFlags() & kAccPreverified) != 0; } @@ -282,15 +274,7 @@ class ArtMethod FINAL { uint32_t name_and_signature_idx) SHARED_REQUIRES(Locks::mutator_lock_); - // Invoke this method, passing all the virtual registers in args. - // -- args_size must be the size in bytes (not size in words)! - // -- shorty must be the method shorty (i.e. it includes the return type). - // The result is set when the method finishes execution successfully. - void Invoke(Thread* self, - uint32_t* args, - uint32_t args_size, // NOTE: size in bytes - /*out*/JValue* result, - const char* shorty) + void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty) SHARED_REQUIRES(Locks::mutator_lock_); const void* GetEntryPointFromQuickCompiledCode() { @@ -444,9 +428,6 @@ class ArtMethod FINAL { mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); - // Returns the current method ('this') if this is a regular, non-proxy method. - // Otherwise, when this class is a proxy (IsProxyMethod), look-up the original interface's - // method (that the proxy is "overriding") and return that. ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 785a9be42d..b548dfb639 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -19,12 +19,9 @@ #if defined(__cplusplus) #include "art_method.h" -#include "lambda/art_lambda_method.h" -#include "lambda/closure.h" #include "gc/allocator/rosalloc.h" #include "lock_word.h" #include "mirror/class.h" -#include "mirror/lambda_proxy.h" #include "mirror/string.h" #include "runtime.h" #include "thread.h" @@ -52,8 +49,6 @@ #define ADD_TEST_EQ(x, y) CHECK_EQ(x, y); #endif -namespace art { - static inline void CheckAsmSupportOffsetsAndSizes() { #else #define ADD_TEST_EQ(x, y) @@ -303,80 +298,9 @@ ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, static_cast<int32_t>(art::gc::allocator::RosAlloc::RunSlotNextOffset())) // Assert this so that we can avoid zeroing the next field by installing the class pointer. ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET) -// Working with raw lambdas (lambda::Closure) in raw memory: -// -// |---------------------| -// | ArtLambdaMethod* | <-- pointer to lambda art method, has the info like the size. -// |---------------------| <-- 'data offset' -// | [ Dynamic Size ] | <-- OPTIONAL: only if the ArtLambdaMethod::dynamic_size_ is true. -// |---------------------| -// | Captured Variables | -// | ... | -// |---------------------| <-- total length determined by "dynamic size" if it is present, -// otherwise by the ArtLambdaMethod::static_size_ - -// Offset from start of lambda::Closure to the ArtLambdaMethod*. -#define LAMBDA_CLOSURE_METHOD_OFFSET 0 -ADD_TEST_EQ(static_cast<size_t>(LAMBDA_CLOSURE_METHOD_OFFSET), - offsetof(art::lambda::ClosureStorage, lambda_info_)) -// Offset from the start of lambda::Closure to the data (captured vars or dynamic size). -#define LAMBDA_CLOSURE_DATA_OFFSET __SIZEOF_POINTER__ -ADD_TEST_EQ(static_cast<size_t>(LAMBDA_CLOSURE_DATA_OFFSET), - offsetof(art::lambda::ClosureStorage, captured_)) -// Offsets to captured variables intentionally omitted as it needs a runtime branch. - -// The size of a lambda closure after it's been compressed down for storage. -// -- Although a lambda closure is a virtual register pair (64-bit), we only need 32-bit -// to track the pointer when we are on 32-bit architectures. -// Both the compiler and the runtime therefore compress the closure down for 32-bit archs. -#define LAMBDA_CLOSURE_COMPRESSED_POINTER_SIZE __SIZEOF_POINTER__ -ADD_TEST_EQ(static_cast<size_t>(LAMBDA_CLOSURE_COMPRESSED_POINTER_SIZE), - sizeof(art::lambda::Closure*)) - -// Working with boxed innate lambdas (as a mirror::Object) in raw memory: -// --- Note that this layout only applies to lambdas originally made with create-lambda. -// --- Boxing a lambda created from a new-instance instruction is simply the original object. -// -// |---------------------| -// | object header | -// |---------------------| -// | lambda::Closure* | <-- long on 64-bit, int on 32-bit -// |---------------------| -#define MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET (MIRROR_OBJECT_HEADER_SIZE) -ADD_TEST_EQ(static_cast<size_t>(MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET), - art::mirror::LambdaProxy::GetInstanceFieldOffsetClosure().SizeValue()) - // Equivalent to (private) offsetof(art::mirror::LambdaProxy, closure_)) - -// Working with boxed innate lambdas (as a mirror::Object) in raw memory: -// --- Note that this layout only applies to lambdas originally made with create-lambda. -// --- Boxing a lambda created from a new-instance instruction is simply the original object. -// -// |---------------------| -// | object header | -// |---------------------| -// | lambda::Closure* | <-- long on 64-bit, int on 32-bit -// |---------------------| -#define ART_LAMBDA_METHOD_ART_METHOD_OFFSET (0) -ADD_TEST_EQ(static_cast<size_t>(ART_LAMBDA_METHOD_ART_METHOD_OFFSET), - art::lambda::ArtLambdaMethod::GetArtMethodOffset()) - -#if defined(NDEBUG) -// Release should be faaast. So just jump directly to the lambda method. -#define LAMBDA_PROXY_SETUP_FRAME 0 -#else -// Debug can be slower, and we want to get better stack traces. Set up a frame. -#define LAMBDA_PROXY_SETUP_FRAME 1 -#endif - -// For WIP implementation, lambda types are all "longs" -// which means on a 32-bit implementation we need to fill the argument with 32-bit 0s -// whenever we invoke a method with a lambda in it. -// TODO: remove all usages of this once we go to a proper \LambdaType; system. -#define LAMBDA_INVOKE_USES_LONG 1 #if defined(__cplusplus) } // End of CheckAsmSupportOffsets. -} // namespace art #endif #endif // ART_RUNTIME_ASM_SUPPORT_H_ diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h index e2ade07555..969f5b953f 100644 --- a/runtime/base/allocator.h +++ b/runtime/base/allocator.h @@ -53,7 +53,6 @@ enum AllocatorTag { kAllocatorTagClassTable, kAllocatorTagInternTable, kAllocatorTagLambdaBoxTable, - kAllocatorTagLambdaProxyClassBoxTable, kAllocatorTagMaps, kAllocatorTagLOS, kAllocatorTagSafeMap, diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 6ca56f53f6..70bd398415 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -65,7 +65,6 @@ Mutex* Locks::thread_suspend_count_lock_ = nullptr; Mutex* Locks::trace_lock_ = nullptr; Mutex* Locks::unexpected_signal_lock_ = nullptr; Mutex* Locks::lambda_table_lock_ = nullptr; -Mutex* Locks::lambda_class_table_lock_ = nullptr; Uninterruptible Roles::uninterruptible_; struct AllMutexData { @@ -955,7 +954,6 @@ void Locks::Init() { DCHECK(trace_lock_ != nullptr); DCHECK(unexpected_signal_lock_ != nullptr); DCHECK(lambda_table_lock_ != nullptr); - DCHECK(lambda_class_table_lock_ != nullptr); } else { // Create global locks in level order from highest lock level to lowest. LockLevel current_lock_level = kInstrumentEntrypointsLock; @@ -1074,10 +1072,6 @@ void Locks::Init() { DCHECK(lambda_table_lock_ == nullptr); lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level); - UPDATE_CURRENT_LOCK_LEVEL(kLambdaClassTableLock); - DCHECK(lambda_class_table_lock_ == nullptr); - lambda_class_table_lock_ = new Mutex("lambda class table lock", current_lock_level); - UPDATE_CURRENT_LOCK_LEVEL(kAbortLock); DCHECK(abort_lock_ == nullptr); abort_lock_ = new Mutex("abort lock", current_lock_level, true); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index e2d7062f83..d4c9057ab3 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -60,7 +60,6 @@ enum LockLevel { kUnexpectedSignalLock, kThreadSuspendCountLock, kAbortLock, - kLambdaClassTableLock, kLambdaTableLock, kJdwpSocketLock, kRegionSpaceRegionLock, @@ -693,10 +692,6 @@ class Locks { // Allow reader-writer mutual exclusion on the boxed table of lambda objects. // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it. static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_); - - // Allow reader-writer mutual exclusion on the boxed table of lambda proxy classes. - // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it. - static Mutex* lambda_class_table_lock_ ACQUIRED_AFTER(lambda_table_lock_); }; class Roles { diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 8a0d8d4522..2dd2a83888 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -56,7 +56,6 @@ #include "interpreter/interpreter.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" -#include "lambda/box_class_table.h" #include "leb128.h" #include "linear_alloc.h" #include "mirror/class.h" @@ -65,7 +64,6 @@ #include "mirror/dex_cache-inl.h" #include "mirror/field.h" #include "mirror/iftable-inl.h" -#include "mirror/lambda_proxy.h" #include "mirror/method.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -583,9 +581,6 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Create java.lang.reflect.Proxy root. SetClassRoot(kJavaLangReflectProxy, FindSystemClass(self, "Ljava/lang/reflect/Proxy;")); - // Create java.lang.LambdaProxy root. - SetClassRoot(kJavaLangLambdaProxy, FindSystemClass(self, "Ljava/lang/LambdaProxy;")); - // Create java.lang.reflect.Field.class root. auto* class_root = FindSystemClass(self, "Ljava/lang/reflect/Field;"); CHECK(class_root != nullptr); @@ -1262,7 +1257,6 @@ void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data) { } delete data.allocator; delete data.class_table; - delete data.lambda_box_class_table; } mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) { @@ -1904,10 +1898,8 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(ArtMethod* method, bool* // Special case to get oat code without overwriting a trampoline. const void* ClassLinker::GetQuickOatCodeFor(ArtMethod* method) { CHECK(method->IsInvokable()) << PrettyMethod(method); - if (method->IsReflectProxyMethod()) { + if (method->IsProxyMethod()) { return GetQuickProxyInvokeHandler(); - } else if (method->IsLambdaProxyMethod()) { - return GetQuickLambdaProxyInvokeHandler(); } bool found; OatFile::OatMethod oat_method = FindOatMethodFor(method, &found); @@ -3265,7 +3257,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& klass->SetName(soa.Decode<mirror::String*>(name)); klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache()); mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self); - std::string descriptor(GetDescriptorForAnyProxy(klass.Get())); + std::string descriptor(GetDescriptorForProxy(klass.Get())); const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str()); // Needs to be before we insert the class so that the allocator field is set. @@ -3385,228 +3377,23 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& decoded_name->ToModifiedUtf8().c_str())); CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name); - CHECK_EQ(klass.Get()->GetInterfacesForAnyProxy(), - soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)); - CHECK_EQ(klass.Get()->GetThrowsForAnyProxy(), - soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>*>(throws)); - } - return klass.Get(); -} - -mirror::Class* ClassLinker::CreateLambdaProxyClass(ScopedObjectAccessAlreadyRunnable& soa, - jstring name, - jobjectArray interfaces, - jobject loader, - jobjectArray methods, - jobjectArray throws, - bool* already_exists) { - DCHECK(already_exists != nullptr); - *already_exists = false; - - Thread* self = soa.Self(); - StackHandleScope<10> hs(self); - - // Allocate a new java.lang.Class object for a mirror::Proxy. - MutableHandle<mirror::Class> klass = - hs.NewHandle(AllocClass(self, GetClassRoot(kJavaLangClass), sizeof(mirror::Class))); - if (klass.Get() == nullptr) { - CHECK(self->IsExceptionPending()); // OOME. - return nullptr; - } - DCHECK(klass->GetClass() != nullptr); - klass->SetObjectSize(sizeof(mirror::LambdaProxy)); - - // Set the class access flags incl. preverified, so we do not try to set the flag on the methods. - klass->SetAccessFlags(kAccClassIsLambdaProxy | kAccPublic | kAccFinal | kAccPreverified); - klass->SetClassLoader(soa.Decode<mirror::ClassLoader*>(loader)); - DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot); - klass->SetName(soa.Decode<mirror::String*>(name)); - klass->SetDexCache(GetClassRoot(kJavaLangLambdaProxy)->GetDexCache()); - // Set the status to be just before after loading it, but before anything is resolved. - mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self); - // Convert "foo.bar.baz" string to "Lfoo/bar/baz;" - std::string type_descriptor(GetDescriptorForAnyProxy(klass.Get())); - - mirror::Class* existing; - { - const size_t hash = ComputeModifiedUtf8Hash(type_descriptor.c_str()); - - // Insert the class before loading the fields as the field roots - // (ArtField::declaring_class_) are only visited from the class - // table. There can't be any suspend points between inserting the - // class and setting the field arrays below. - existing = InsertClass(type_descriptor.c_str(), klass.Get(), hash); - } - if (UNLIKELY(existing != nullptr)) { - // We had already made the lambda proxy previously. Return it. - - *already_exists = true; - return existing; - // Let the GC clean up the class we had already allocated but isn't being used. - } - - // Needs to be after we insert the class so that the allocator field is set. - LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader()); - - // Instance fields are inherited, but we add a couple of static fields... - LengthPrefixedArray<ArtField>* sfields = - AllocArtFieldArray(self, allocator, mirror::LambdaProxy::kStaticFieldCount); - klass->SetSFieldsPtr(sfields); - - // 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by - // our proxy, so Class.getInterfaces doesn't return the flattened set. - // -- private static java.lang.Class[] interfaces; // list of declared interfaces - ArtField& interfaces_sfield = sfields->At(mirror::LambdaProxy::kStaticFieldIndexInterfaces); - interfaces_sfield.SetDexFieldIndex(mirror::LambdaProxy::kStaticFieldIndexInterfaces); - interfaces_sfield.SetDeclaringClass(klass.Get()); - interfaces_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); - - // 2. Create a static field 'throws' that holds the classes of exceptions thrown by our methods. - // This is returned by java.lang.reflect.Method#getExceptionTypes() - // --- private static java.lang.Class[][] throws; // maps vtable id to list of classes. - ArtField& throws_sfield = sfields->At(mirror::LambdaProxy::kStaticFieldIndexThrows); - throws_sfield.SetDexFieldIndex(mirror::LambdaProxy::kStaticFieldIndexThrows); - throws_sfield.SetDeclaringClass(klass.Get()); - throws_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); - - // Set up the Constructor method. - { - // Lambda proxies have 1 direct method, the constructor. - static constexpr size_t kNumDirectMethods = 1; - LengthPrefixedArray<ArtMethod>* directs = AllocArtMethodArray(self, - allocator, - kNumDirectMethods); - // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we - // want to throw OOM in the future. - if (UNLIKELY(directs == nullptr)) { - self->AssertPendingOOMException(); - return nullptr; - } - klass->SetDirectMethodsPtr(directs); - CreateLambdaProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_)); - } - - // Create virtual method using specified prototypes. - auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods)); - DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass()) - << PrettyClass(h_methods->GetClass()); - const size_t num_virtual_methods = h_methods->GetLength(); - auto* virtuals = AllocArtMethodArray(self, allocator, num_virtual_methods); - // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we - // want to throw OOM in the future. - if (UNLIKELY(virtuals == nullptr)) { - self->AssertPendingOOMException(); - return nullptr; - } - klass->SetVirtualMethodsPtr(virtuals); - size_t abstract_methods = 0; - for (size_t i = 0; i < num_virtual_methods; ++i) { - ArtMethod* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); - ArtMethod* prototype = h_methods->Get(i)->GetArtMethod(); - if (UNLIKELY((prototype->GetAccessFlags() & kAccDefault) != 0)) { - UNIMPLEMENTED(FATAL) << "Lambda proxies don't support default methods yet"; - } - if (prototype->IsAbstract()) { - abstract_methods++; - } - VLOG(class_linker) << "Creating lambda proxy method for " << PrettyMethod(prototype); - - CreateLambdaProxyMethod(klass, prototype, virtual_method); - DCHECK(virtual_method->GetDeclaringClass() != nullptr); - DCHECK(prototype->GetDeclaringClass() != nullptr); - } - // Ignore any methods from Object and default methods, it doesn't matter. - // Sanity check that the prototype interface is indeed compatible with lambdas. - DCHECK_EQ(abstract_methods, 1u) - << "Interface must be a single-abstract-method type" << PrettyClass(klass.Get()); - - // The super class is java.lang.LambdaProxy - klass->SetSuperClass(GetClassRoot(kJavaLangLambdaProxy)); - // Now effectively in the loaded state. - mirror::Class::SetStatus(klass, mirror::Class::kStatusLoaded, self); - self->AssertNoPendingException(); - - MutableHandle<mirror::Class> new_class = hs.NewHandle<mirror::Class>(nullptr); - { - // Must hold lock on object when resolved. - ObjectLock<mirror::Class> resolution_lock(self, klass); - // Link the fields and virtual methods, creating vtable and iftables. - // The new class will replace the old one in the class table. - Handle<mirror::ObjectArray<mirror::Class>> h_interfaces( - hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces))); - - { - DCHECK_EQ(1, h_interfaces->GetLength()) << "Lambda proxies must implement 1 interface only"; - mirror::Class* single_abstract_interface = h_interfaces->Get(0); - DCHECK(single_abstract_interface != nullptr); - - // Use the dex cache from the interface, which will enable most of the - // dex-using mechanisms on the class and its methods will work. - klass->SetDexCache(single_abstract_interface->GetDexCache()); - } - - if (!LinkClass(self, type_descriptor.c_str(), klass, h_interfaces, &new_class)) { - mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self); - return nullptr; - } - } - CHECK(klass->IsRetired()); - CHECK_NE(klass.Get(), new_class.Get()); - klass.Assign(new_class.Get()); - - CHECK_EQ(interfaces_sfield.GetDeclaringClass(), klass.Get()); - interfaces_sfield.SetObject<false>(klass.Get(), - soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)); - - CHECK_EQ(throws_sfield.GetDeclaringClass(), klass.Get()); - throws_sfield.SetObject<false>( - klass.Get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws)); - - { - // Lock on klass is released. Lock new class object. - ObjectLock<mirror::Class> initialization_lock(self, klass); - mirror::Class::SetStatus(klass, mirror::Class::kStatusInitialized, self); - } - - // Sanity checks - if (kIsDebugBuild) { - CHECK(klass->GetIFieldsPtr() == nullptr); - CheckLambdaProxyConstructor(klass->GetDirectMethod(0, image_pointer_size_)); - - for (size_t i = 0; i < num_virtual_methods; ++i) { - ArtMethod* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); - ArtMethod* prototype = h_methods->Get(i++)->GetArtMethod(); - CheckLambdaProxyMethod(virtual_method, prototype); - } - - StackHandleScope<1> hs2(self); - Handle<mirror::String> decoded_name = hs2.NewHandle(soa.Decode<mirror::String*>(name)); - std::string interfaces_field_name(StringPrintf("java.lang.Class[] %s.interfaces", - decoded_name->ToModifiedUtf8().c_str())); - CHECK_EQ(PrettyField(klass->GetStaticField(0)), interfaces_field_name); - - std::string throws_field_name(StringPrintf("java.lang.Class[][] %s.throws", - decoded_name->ToModifiedUtf8().c_str())); - CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name); - - CHECK_EQ(klass.Get()->GetInterfacesForAnyProxy(), + CHECK_EQ(klass.Get()->GetInterfaces(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)); - CHECK_EQ(klass.Get()->GetThrowsForAnyProxy(), + CHECK_EQ(klass.Get()->GetThrows(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>*>(throws)); } return klass.Get(); } -std::string ClassLinker::GetDescriptorForAnyProxy(mirror::Class* proxy_class) { - DCHECK(proxy_class != nullptr); - DCHECK(proxy_class->IsAnyProxyClass()); +std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) { + DCHECK(proxy_class->IsProxyClass()); mirror::String* name = proxy_class->GetName(); DCHECK(name != nullptr); return DotToDescriptor(name->ToModifiedUtf8().c_str()); } ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) { - DCHECK(proxy_class->IsAnyProxyClass()); + DCHECK(proxy_class->IsProxyClass()); DCHECK(proxy_method->IsProxyMethod()); { Thread* const self = Thread::Current(); @@ -3634,7 +3421,7 @@ ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) { // Create constructor for Proxy that must initialize the method. - CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 18u); + CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 16u); ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->GetDirectMethodUnchecked( 2, image_pointer_size_); // Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden @@ -3650,38 +3437,6 @@ void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out->SetDeclaringClass(klass.Get()); } -void ClassLinker::CreateLambdaProxyConstructor(Handle<mirror::Class> klass, - /*out*/ArtMethod* method_constructor) { - DCHECK(klass.Get() != nullptr); - DCHECK(method_constructor != nullptr); - - // Create constructor for Proxy that must initialize the method. - // Lambda proxy superclass only has 1 direct method, the constructor (<init>()V) - CHECK_EQ(GetClassRoot(kJavaLangLambdaProxy)->NumDirectMethods(), - mirror::LambdaProxy::kDirectMethodCount); - // Get the constructor method. - ArtMethod* proxy_constructor = GetClassRoot(kJavaLangLambdaProxy)->GetDirectMethodUnchecked( - mirror::LambdaProxy::kDirectMethodIndexConstructor, - image_pointer_size_); - - // Verify constructor method is indeed a constructor. - CHECK(proxy_constructor != nullptr); - - // Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden - // constructor method. - GetClassRoot(kJavaLangLambdaProxy)->GetDexCache()->SetResolvedMethod( - proxy_constructor->GetDexMethodIndex(), - proxy_constructor, - image_pointer_size_); - - // Clone the existing constructor of LambdaProxy - // (our constructor would just invoke it so steal its code_ too). - method_constructor->CopyFrom(proxy_constructor, image_pointer_size_); - // Make this constructor public and fix the class to be our LambdaProxy version - method_constructor->SetAccessFlags((method_constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic); - method_constructor->SetDeclaringClass(klass.Get()); -} - void ClassLinker::CheckProxyConstructor(ArtMethod* constructor) const { CHECK(constructor->IsConstructor()); auto* np = constructor->GetInterfaceMethodIfProxy(image_pointer_size_); @@ -3690,14 +3445,6 @@ void ClassLinker::CheckProxyConstructor(ArtMethod* constructor) const { DCHECK(constructor->IsPublic()); } -void ClassLinker::CheckLambdaProxyConstructor(ArtMethod* constructor) const { - CHECK(constructor->IsConstructor()); - auto* np = constructor->GetInterfaceMethodIfProxy(image_pointer_size_); - CHECK_STREQ(np->GetName(), "<init>"); - CHECK_STREQ(np->GetSignature().ToString().c_str(), "()V"); - DCHECK(constructor->IsPublic()); -} - void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out) { // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden @@ -3709,7 +3456,6 @@ void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prot dex_cache->SetResolvedMethod( prototype->GetDexMethodIndex(), prototype, image_pointer_size_); } - // We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize // as necessary DCHECK(out != nullptr); @@ -3725,42 +3471,6 @@ void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prot out->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler()); } -void ClassLinker::CreateLambdaProxyMethod(Handle<mirror::Class> klass, - ArtMethod* prototype, - ArtMethod* out) { - DCHECK(prototype != nullptr); - DCHECK(out != nullptr); - - // DO NOT go through the proxy invoke handler for the default methods. They have no idea - // how to handle the raw closure, so they must get the regular object when invoked. - CHECK_EQ(prototype->GetAccessFlags() & kAccDefault, 0u) << "Default methods must not be proxied"; - - // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden - // prototype method - auto* dex_cache = prototype->GetDeclaringClass()->GetDexCache(); - // Avoid dirtying the dex cache unless we need to. - if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex(), image_pointer_size_) != - prototype) { - dex_cache->SetResolvedMethod( - prototype->GetDexMethodIndex(), prototype, image_pointer_size_); - } - // We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize - // as necessary - out->CopyFrom(prototype, image_pointer_size_); - - // Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to - // the intersection of throw exceptions as defined in Proxy - out->SetDeclaringClass(klass.Get()); - out->SetAccessFlags((out->GetAccessFlags() & ~kAccAbstract) | kAccFinal); - - // Setting the entry point isn't safe for AOT since ASLR loads it anywhere at runtime. - CHECK(!Runtime::Current()->IsAotCompiler()); - - // At runtime the method looks like a reference and argument saving method, clone the code - // related parameters from this method. - out->SetEntryPointFromQuickCompiledCode(GetQuickLambdaProxyInvokeHandler()); -} - void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const { // Basic sanity CHECK(!prototype->IsFinal()); @@ -3782,11 +3492,6 @@ void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) cons prototype->GetReturnType(true /* resolve */, image_pointer_size_)); } -void ClassLinker::CheckLambdaProxyMethod(ArtMethod* method, ArtMethod* prototype) const { - // same as above. - return CheckProxyMethod(method, prototype); -} - bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents) { if (can_init_statics && can_init_parents) { @@ -4418,9 +4123,7 @@ ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* cla class_loader->SetClassTable(data.class_table); // Should have been set when we registered the dex file. data.allocator = class_loader->GetAllocator(); - CHECK(class_loader->GetLambdaProxyCache() == nullptr); - data.lambda_box_class_table = new lambda::BoxClassTable(); - class_loader->SetLambdaProxyCache(data.lambda_box_class_table); + CHECK(data.allocator != nullptr); class_loaders_.push_back(data); } return class_table; @@ -6863,7 +6566,6 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) { "Ljava/lang/reflect/Field;", "Ljava/lang/reflect/Method;", "Ljava/lang/reflect/Proxy;", - "Ljava/lang/LambdaProxy;", "[Ljava/lang/String;", "[Ljava/lang/reflect/Constructor;", "[Ljava/lang/reflect/Field;", diff --git a/runtime/class_linker.h b/runtime/class_linker.h index f073cd8170..29aac312c1 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -40,11 +40,6 @@ namespace space { class ImageSpace; } // namespace space } // namespace gc - -namespace lambda { - class BoxClassTable; -} // namespace lambda - namespace mirror { class ClassLoader; class DexCache; @@ -87,7 +82,6 @@ class ClassLinker { kJavaLangReflectField, kJavaLangReflectMethod, kJavaLangReflectProxy, - kJavaLangLambdaProxy, kJavaLangStringArrayClass, kJavaLangReflectConstructorArrayClass, kJavaLangReflectFieldArrayClass, @@ -430,46 +424,12 @@ class ClassLinker { jobjectArray methods, jobjectArray throws) SHARED_REQUIRES(Locks::mutator_lock_); - - // Get the long type descriptor, e.g. "LProxyName$1234;" for the requested proxy class. - static std::string GetDescriptorForAnyProxy(mirror::Class* proxy_class) + std::string GetDescriptorForProxy(mirror::Class* proxy_class) SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - // Create a lambda proxy class. - // -- Nominally used when boxing an innate lambda, since that has no corresponding class. - // - // * name must be a fully-qualified class name (and dotted), e.g. "java.lang.Runnable" - // * interfaces is an array of java.lang.Class for interfaces that will be the supertype - // (note that there must be exactly 1 element here for a lambda interface since lambda - // types can only target 1 interface). - // * loader must be a java.lang.ClassLoader where the proxy class will be created - // * methods must be an array of java.lang.reflect.Method that consists of the - // deduplicated methods from all of the interfaces specified. - // * throws must be an array of java.lang.Class[] where each index corresponds to that of - // methods, and it signifies the "throws" keyword of each method - // (this is not directly used by the runtime itself, but it is available via reflection). - // - // Returns a non-null pointer to a class upon success, otherwise null and throws an exception. - // - // If the class was already created previously (with the same name but potentially different - // parameters), already_exists is set to true; otherwise already_exists is set to false. - // The already_exists value is undefined when an exception was thrown. - // - // Sidenote: interfaces is an array to simplify the libcore code which creates a Java - // array in an attempt to reduce code duplication. - // TODO: this should probably also take the target single-abstract-method as well. - mirror::Class* CreateLambdaProxyClass(ScopedObjectAccessAlreadyRunnable& soa, - jstring name, - jobjectArray interfaces, - jobject loader, - jobjectArray methods, - jobjectArray throws, - /*out*/bool* already_exists) - SHARED_REQUIRES(Locks::mutator_lock_); - // Get the oat code for a method when its class isn't yet initialized const void* GetQuickOatCodeFor(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); @@ -613,7 +573,6 @@ class ClassLinker { jweak weak_root; // Weak root to enable class unloading. ClassTable* class_table; LinearAlloc* allocator; - lambda::BoxClassTable* lambda_box_class_table; }; // Ensures that the supertype of 'klass' ('supertype') is verified. Returns false and throws @@ -948,12 +907,8 @@ class ClassLinker { void CheckProxyConstructor(ArtMethod* constructor) const SHARED_REQUIRES(Locks::mutator_lock_); - void CheckLambdaProxyConstructor(ArtMethod* constructor) const - SHARED_REQUIRES(Locks::mutator_lock_); void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const SHARED_REQUIRES(Locks::mutator_lock_); - void CheckLambdaProxyMethod(ArtMethod* method, ArtMethod* prototype) const - SHARED_REQUIRES(Locks::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots ReaderWriterMutex* DexLock() @@ -971,19 +926,9 @@ class ClassLinker { void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) SHARED_REQUIRES(Locks::mutator_lock_); - - // Copy the constructor from java.lang.LambdaProxy into the 'klass'. - // The copy is written into 'method_constructor'. - void CreateLambdaProxyConstructor(Handle<mirror::Class> klass, - /*out*/ArtMethod* method_constructor) - SHARED_REQUIRES(Locks::mutator_lock_); - void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out) SHARED_REQUIRES(Locks::mutator_lock_); - void CreateLambdaProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out) - SHARED_REQUIRES(Locks::mutator_lock_); - // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the // class access flags to determine whether this has been done before. void EnsurePreverifiedMethods(Handle<mirror::Class> c) @@ -995,10 +940,7 @@ class ClassLinker { // Returns null if not found. ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader) SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_); - - // Insert a new class table if not found. Uses bootclasspath if class_loader is null. - // Returns either the existing table, or the new one if there wasn't one previously - // (the return value is always non-null). + // Insert a new class table if not found. ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::classlinker_classes_lock_); diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 4a9db1d50d..2c086c59f0 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -31,7 +31,6 @@ #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "mirror/field.h" -#include "mirror/lambda_proxy.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/proxy.h" @@ -553,7 +552,6 @@ struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> { ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") { addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, allocator_), "allocator"); addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, class_table_), "classTable"); - addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, lambda_proxy_cache_), "lambdaProxyCache"); addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages"); addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent"); addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache"); @@ -566,13 +564,6 @@ struct ProxyOffsets : public CheckOffsets<mirror::Proxy> { }; }; -struct LambdaProxyOffsets : public CheckOffsets<mirror::LambdaProxy> { - LambdaProxyOffsets() : CheckOffsets<mirror::LambdaProxy>(false, "Ljava/lang/LambdaProxy;") { - addOffset(OFFSETOF_MEMBER(mirror::LambdaProxy, closure_), "closure"); - }; -}; - - struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> { DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") { addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex"); @@ -648,7 +639,6 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) { EXPECT_TRUE(StackTraceElementOffsets().Check()); EXPECT_TRUE(ClassLoaderOffsets().Check()); EXPECT_TRUE(ProxyOffsets().Check()); - EXPECT_TRUE(LambdaProxyOffsets().Check()); EXPECT_TRUE(DexCacheOffsets().Check()); EXPECT_TRUE(ReferenceOffsets().Check()); EXPECT_TRUE(FinalizerReferenceOffsets().Check()); diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index e84313cb9a..f705a50d55 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -55,6 +55,7 @@ int main(int argc, char **argv) { // Gtests can be very noisy. For example, an executable with multiple tests will trigger native // bridge warnings. The following line reduces the minimum log severity to ERROR and suppresses // everything else. In case you want to see all messages, comment out the line. + setenv("ANDROID_LOG_TAGS", "*:e", 1); art::InitLogging(argv); LOG(::art::INFO) << "Running main() from common_runtime_test.cc..."; diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index 2a92226f4a..87e29ae3c3 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -313,7 +313,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons reinterpret_cast<uintptr_t>(virtual_methods)) / method_size; CHECK_LT(throws_index, static_cast<int>(num_virtuals)); mirror::ObjectArray<mirror::Class>* declared_exceptions = - proxy_class->GetThrowsForAnyProxy()->Get(throws_index); + proxy_class->GetThrows()->Get(throws_index); mirror::Class* exception_class = exception->GetClass(); bool declares_exception = false; for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 8c2dc3e42a..abf9ac49e6 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -23,12 +23,9 @@ #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/accounting/card_table-inl.h" #include "interpreter/interpreter.h" -#include "lambda/closure.h" -#include "lambda/art_lambda_method.h" #include "method_reference.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" -#include "mirror/lambda_proxy.h" #include "mirror/method.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -297,8 +294,7 @@ class QuickArgumentVisitor { // 1st GPR. static mirror::Object* GetProxyThisObject(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { - // TODO: Lambda proxies only set up a frame when debugging - CHECK((*sp)->IsReflectProxyMethod() || ((*sp)->IsLambdaProxyMethod() /*&& kIsDebugBuild*/)); + CHECK((*sp)->IsProxyMethod()); CHECK_GT(kNumQuickGprArgs, 0u); constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR. size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset + @@ -838,9 +834,8 @@ void BuildQuickArgumentVisitor::FixupReferences() { extern "C" uint64_t artQuickProxyInvokeHandler( ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { - DCHECK(proxy_method->GetDeclaringClass()->IsReflectProxyClass()) << PrettyMethod(proxy_method); - DCHECK(proxy_method->IsReflectProxyMethod()) << PrettyMethod(proxy_method); - DCHECK(receiver->GetClass()->IsReflectProxyClass()) << PrettyMethod(proxy_method); + DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); + DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); // Ensure we don't get thread suspension until the object arguments are safely in jobjects. const char* old_cause = self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); @@ -883,175 +878,6 @@ extern "C" uint64_t artQuickProxyInvokeHandler( return result.GetJ(); } -extern "C" uint64_t artQuickLambdaProxyInvokeHandler( - ArtMethod* proxy_method, mirror::LambdaProxy* receiver, Thread* self, ArtMethod** sp) - SHARED_REQUIRES(Locks::mutator_lock_) { - using lambda::ShortyFieldType; - - DCHECK(proxy_method->GetDeclaringClass()->IsLambdaProxyClass()) << PrettyMethod(proxy_method); - DCHECK(proxy_method->IsLambdaProxyMethod()) << PrettyMethod(proxy_method); - DCHECK(receiver->GetClass()->IsLambdaProxyClass()) << PrettyMethod(proxy_method); - - lambda::Closure* lambda_closure = receiver->GetClosure(); - DCHECK(lambda_closure != nullptr); // Should've NPEd during the invoke-interface. - // Learned lambdas have their own implementation of the SAM, they must not go through here. - DCHECK(lambda_closure->GetLambdaInfo()->IsInnateLambda()); - ArtMethod* target_method = lambda_closure->GetTargetMethod(); - - // Lambda targets are always static. - // TODO: This should really be a target_method->IsLambda(), once we add the access flag. - CHECK(target_method->IsStatic()) << PrettyMethod(proxy_method) << " " - << PrettyMethod(target_method); - - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT/SF lambda proxy object arguments"); - // Register the top of the managed stack, making stack crawlable. - DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method); - self->VerifyStack(); - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - - // Placing arguments into args vector and remove the receiver. - ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*)); - CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " " - << PrettyMethod(non_proxy_method); - uint32_t shorty_len = 0; - const char* shorty = non_proxy_method->GetShorty(/*out*/&shorty_len); - - std::vector<jvalue> args; - // Make a quick visitor so we can restore the refs incase they move after a GC. - BuildQuickArgumentVisitor local_ref_visitor(sp, - false /*is_static*/, - shorty, - shorty_len, - &soa, - /*out*/&args); - local_ref_visitor.VisitArguments(); - - static_assert(lambda::kClosureIsStoredAsLong, - "Need to update this code once closures are no " - "longer treated as a 'long' in quick abi"); - - // Allocate one vreg more than usual because we need to convert our - // receiver Object (1 vreg) into a long (2 vregs). - // TODO: Ugly... move to traits instead? - const uint32_t first_arg_reg = ShortyFieldType(ShortyFieldType::kLambda).GetVirtualRegisterCount() - - ShortyFieldType(ShortyFieldType::kObject).GetVirtualRegisterCount(); - const uint32_t num_vregs = lambda_closure->GetLambdaInfo()->GetArgumentVRegCount(); - DCHECK_GE(num_vregs, first_arg_reg); - if (kIsDebugBuild) { - const char* method_shorty = non_proxy_method->GetShorty(); - DCHECK_NE(*method_shorty, '\0') << method_shorty; - const char* arg_shorty = method_shorty + 1; // Skip return type. - - // Proxy method should have an object (1 vreg) receiver, - // Lambda method should have a lambda (2 vregs) receiver. - // -- All other args are the same as before. - // -- Make sure vreg count is what we thought it was. - uint32_t non_proxy_num_vregs = - ShortyFieldType::CountVirtualRegistersRequired(arg_shorty) // doesn't count receiver - + ShortyFieldType(ShortyFieldType::kObject).GetVirtualRegisterCount(); // implicit receiver - - CHECK_EQ(non_proxy_num_vregs + first_arg_reg, num_vregs) - << PrettyMethod(non_proxy_method) << " " << PrettyMethod(lambda_closure->GetTargetMethod()); - } - - ShadowFrameAllocaUniquePtr shadow_frame = CREATE_SHADOW_FRAME(num_vregs, - /*link*/nullptr, - target_method, - /*dex_pc*/0); - - // Copy our proxy method caller's arguments into this ShadowFrame. - BuildQuickShadowFrameVisitor local_sf_visitor(sp, - /*is_static*/false, - shorty, - shorty_len, - shadow_frame.get(), - first_arg_reg); - - local_sf_visitor.VisitArguments(); - // Now fix up the arguments, with each ArgK being a vreg: - - // (Before): - // Arg0 = proxy receiver (LambdaProxy) - // Arg1 = first-user defined argument - // Arg2 = second user-defined argument - // .... - // ArgN = ... - - // (After) - // Arg0 = closure (hi) - // Arg1 = closure (lo) = 0x00 on 32-bit - // Arg2 = <?> (first user-defined argument) - // Arg3 = <?> (first user-defined argument) - // ... - // argN+1 = ... - - // Transformation diagram: - /* - Arg0 Arg2 Arg3 ... ArgN - | \ \ \ - | \ \ \ - ClHi ClLo Arg2 Arg3 ... ArgN: - */ - - // 1) memmove vregs 1-N into 2-N+1 - uint32_t* shadow_frame_vregs = shadow_frame->GetVRegArgs(/*i*/0); - if (lambda::kClosureIsStoredAsLong || - sizeof(void*) != sizeof(mirror::CompressedReference<mirror::LambdaProxy>)) { - // Suspending here would be very bad since we are doing a raw memmove - - // Move the primitive vregs over. - { - size_t shadow_frame_vregs_size = num_vregs; - memmove(shadow_frame_vregs + first_arg_reg, - shadow_frame_vregs, - shadow_frame_vregs_size - first_arg_reg); - } - - // Move the reference vregs over. - if (LIKELY(shadow_frame->HasReferenceArray())) { - uint32_t* shadow_frame_references = shadow_frame_vregs + num_vregs; - size_t shadow_frame_references_size = num_vregs; - memmove(shadow_frame_references + first_arg_reg, - shadow_frame_references, - shadow_frame_references_size - first_arg_reg); - } - - static_assert(lambda::kClosureSupportsReadBarrier == false, - "Using this memmove code with a read barrier GC seems like it could be unsafe."); - - static_assert(sizeof(mirror::CompressedReference<mirror::LambdaProxy>) == sizeof(uint32_t), - "This block of code assumes a compressed reference fits into exactly 1 vreg"); - } - // 2) replace proxy receiver with lambda - shadow_frame->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<uintptr_t>(lambda_closure))); - - // OK: After we do the invoke, the target method takes over managing the arguments - // and we won't ever access the shadow frame again (if any references moved). - self->EndAssertNoThreadSuspension(old_cause); - - // The shadow frame vreg contents are now 'owned' by the Invoke method, and - // will be managed by it during a GC despite being a raw uint32_t array. - // We however have no guarantee that it is updated on the way out, so do not read out of the - // shadow frame after this call. - JValue result; - target_method->Invoke(self, - shadow_frame_vregs, - num_vregs * sizeof(uint32_t), - /*out*/&result, - target_method->GetShorty()); - - // Restore references on the proxy caller stack frame which might have moved. - // -- This is necessary because the QuickFrameInfo is just the generic runtime "RefsAndArgs" - // which means that the regular stack visitor wouldn't know how to GC-move any references - // that we spilled ourselves in the proxy stub. - local_ref_visitor.FixupReferences(); - return result.GetJ(); -} - // Read object references held in arguments from quick frames and place in a JNI local references, // so they don't get garbage collected. class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h index 1ef75855ab..2842c5a5a6 100644 --- a/runtime/entrypoints/runtime_asm_entrypoints.h +++ b/runtime/entrypoints/runtime_asm_entrypoints.h @@ -17,10 +17,6 @@ #ifndef ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_ #define ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_ -// Define entry points to assembly routines. -// All extern "C" functions here are defined in a corresponding assembly-only file. -// The exact file paths are runtime/arch/$ISA/quick_entrypoints_$ISA.s - namespace art { #ifndef BUILDING_LIBART @@ -56,13 +52,6 @@ static inline const void* GetQuickProxyInvokeHandler() { return reinterpret_cast<const void*>(art_quick_proxy_invoke_handler); } -// Return the address of quick stub code for handling transitions into the lambda proxy -// invoke handler. -extern "C" void art_quick_lambda_proxy_invoke_handler(); -static inline const void* GetQuickLambdaProxyInvokeHandler() { - return reinterpret_cast<const void*>(art_quick_lambda_proxy_invoke_handler); -} - // Return the address of quick stub code for resolving a method at first call. extern "C" void art_quick_resolution_trampoline(ArtMethod*); static inline const void* GetQuickResolutionStub() { diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 07f0628ee2..da9a79e1a2 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -672,8 +672,8 @@ std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) { return result; } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) { return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>()); - } else if (UNLIKELY(klass->IsAnyProxyClass<kVerifyNone>())) { - return Runtime::Current()->GetClassLinker()->GetDescriptorForAnyProxy(klass); + } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) { + return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass); } else { mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>(); if (!IsValidContinuousSpaceObjectAddress(dex_cache)) { diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 2de8e7e6be..9f6699f730 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -888,56 +888,12 @@ static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const In return false; } - StackHandleScope<1> hs{self}; // NOLINT: [readability/braces] [4]; - - // Use the lambda method's class loader since it's close enough. - // TODO: create-lambda should capture the current method's class loader and use that instead. - // TODO: Do we want create-lambda to work for static methods outside of the declaring class? - // --> then we need to store a classloader in the lambda method. otherwise we don't - // because it would always use the declaring class's class loader. - // TODO: add a GetClassLoader to the lambda closure which knows how to do this, - // don't hardcode this here. - Handle<ClassLoader> current_class_loader = hs.NewHandle( - lambda_closure->GetTargetMethod()->GetDeclaringClass()->GetClassLoader()); - - // TODO: get the type ID from the instruction - std::string class_name; - { - // Temporary hack to read the interface corresponding to a box-lambda. - // TODO: The box-lambda should encode the type ID instead, so we don't need to do this. - { - // Do a hack where we read from const-string the interface name - mirror::Object* string_reference = shadow_frame.GetVRegReference(vreg_target_object); - - CHECK(string_reference != nullptr) - << "box-lambda needs the type name stored in string vA (target), but it was null"; - - CHECK(string_reference->IsString()) - << "box-lambda needs the type name stored in string vA (target)"; - - mirror::String* as_string = string_reference->AsString(); - class_name = as_string->ToModifiedUtf8(); - } - - // Trigger class loading of the functional interface. - // TODO: This should actually be done by the create-lambda... - if (Runtime::Current()->GetClassLinker() - ->FindClass(self, class_name.c_str(), current_class_loader) == nullptr) { - CHECK(self->IsExceptionPending()); - self->AssertPendingException(); - return false; - } - } - mirror::Object* closure_as_object = - Runtime::Current()->GetLambdaBoxTable()->BoxLambda(lambda_closure, - class_name.c_str(), - current_class_loader.Get()); + Runtime::Current()->GetLambdaBoxTable()->BoxLambda(lambda_closure); // Failed to box the lambda, an exception was raised. if (UNLIKELY(closure_as_object == nullptr)) { CHECK(self->IsExceptionPending()); - shadow_frame.SetVRegReference(vreg_target_object, nullptr); return false; } diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 11a8c2e636..bf95a0e46f 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -102,8 +102,6 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, size_t lambda_captured_variable_index = 0; while (true) { dex_pc = inst->GetDexPc(insns); - DCHECK_LE(dex_pc, code_item->insns_size_in_code_units_) - << "Dex PC overflowed code item size; missing return instruction?"; shadow_frame.SetDexPC(dex_pc); TraceExecution(shadow_frame, inst, dex_pc); inst_data = inst->Fetch16(0); diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc index 0690cd1a31..6f9f8bbb59 100644 --- a/runtime/lambda/art_lambda_method.cc +++ b/runtime/lambda/art_lambda_method.cc @@ -14,7 +14,6 @@ * limitations under the License. */ -#include "art_method-inl.h" #include "lambda/art_lambda_method.h" #include "base/logging.h" @@ -74,12 +73,5 @@ ArtLambdaMethod::ArtLambdaMethod(ArtMethod* target_method, } } -size_t ArtLambdaMethod::GetArgumentVRegCount() const { - DCHECK(GetArtMethod()->IsStatic()); // Instance methods don't have receiver in shorty. - const char* method_shorty = GetArtMethod()->GetShorty(); - DCHECK_NE(*method_shorty, '\0') << method_shorty; - return ShortyFieldType::CountVirtualRegistersRequired(method_shorty + 1); // skip return type -} - } // namespace lambda } // namespace art diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h index a858bf945d..ea13eb7af6 100644 --- a/runtime/lambda/art_lambda_method.h +++ b/runtime/lambda/art_lambda_method.h @@ -90,17 +90,6 @@ class ArtLambdaMethod { return strlen(captured_variables_shorty_); } - // Return the offset in bytes from the start of ArtLambdaMethod to the method_. - // -- Only should be used by assembly (stubs) support code and compiled code. - static constexpr size_t GetArtMethodOffset() { - return offsetof(ArtLambdaMethod, method_); - } - - // Calculate how many vregs all the arguments will use when doing an invoke. - // (Most primitives are 1 vregs, double/long are 2, reference is 1, lambda is 2). - // -- This is used to know how big to set up shadow frame when invoking into the target method. - size_t GetArgumentVRegCount() const SHARED_REQUIRES(Locks::mutator_lock_); - private: // TODO: ArtMethod, or at least the entry points should be inlined into this struct // to avoid an extra indirect load when doing invokes. diff --git a/runtime/lambda/box_class_table.cc b/runtime/lambda/box_class_table.cc deleted file mode 100644 index 1e49886b95..0000000000 --- a/runtime/lambda/box_class_table.cc +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "lambda/box_class_table.h" - -#include "base/mutex.h" -#include "common_throws.h" -#include "gc_root-inl.h" -#include "lambda/closure.h" -#include "lambda/leaking_allocator.h" -#include "mirror/method.h" -#include "mirror/object-inl.h" -#include "thread.h" - -#include <string> -#include <vector> - -namespace art { -namespace lambda { - -// Create the lambda proxy class given the name of the lambda interface (e.g. Ljava/lang/Runnable;) -// Also needs a proper class loader (or null for bootclasspath) where the proxy will be created -// into. -// -// The class must **not** have already been created. -// Returns a non-null ptr on success, otherwise returns null and has an exception set. -static mirror::Class* CreateClass(Thread* self, - const std::string& class_name, - const Handle<mirror::ClassLoader>& class_loader) - SHARED_REQUIRES(Locks::mutator_lock_) { - ScopedObjectAccessUnchecked soa(self); - StackHandleScope<2> hs(self); - - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - - // Find the java.lang.Class for our class name (from the class loader). - Handle<mirror::Class> lambda_interface = - hs.NewHandle(class_linker->FindClass(self, class_name.c_str(), class_loader)); - // TODO: use LookupClass in a loop - // TODO: DCHECK That this doesn't actually cause the class to be loaded, - // since the create-lambda should've loaded it already - DCHECK(lambda_interface.Get() != nullptr) << "CreateClass with class_name=" << class_name; - DCHECK(lambda_interface->IsInterface()) << "CreateClass with class_name=" << class_name; - jobject lambda_interface_class = soa.AddLocalReference<jobject>(lambda_interface.Get()); - - // Look up java.lang.reflect.Proxy#getLambdaProxyClass method. - Handle<mirror::Class> java_lang_reflect_proxy = - hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/reflect/Proxy;")); - jclass java_lang_reflect_proxy_class = - soa.AddLocalReference<jclass>(java_lang_reflect_proxy.Get()); - DCHECK(java_lang_reflect_proxy.Get() != nullptr); - - jmethodID proxy_factory_method_id = - soa.Env()->GetStaticMethodID(java_lang_reflect_proxy_class, - "getLambdaProxyClass", - "(Ljava/lang/ClassLoader;Ljava/lang/Class;)Ljava/lang/Class;"); - DCHECK(!soa.Env()->ExceptionCheck()); - - // Call into the java code to do the hard work of figuring out which methods and throws - // our lambda interface proxy needs to implement. It then calls back into the class linker - // on our behalf to make the proxy itself. - jobject generated_lambda_proxy_class = - soa.Env()->CallStaticObjectMethod(java_lang_reflect_proxy_class, - proxy_factory_method_id, - class_loader.ToJObject(), - lambda_interface_class); - - // This can throw in which case we return null. Caller must handle. - return soa.Decode<mirror::Class*>(generated_lambda_proxy_class); -} - -BoxClassTable::BoxClassTable() { -} - -BoxClassTable::~BoxClassTable() { - // Don't need to do anything, classes are deleted automatically by GC - // when the classloader is deleted. - // - // Our table will not outlive the classloader since the classloader owns it. -} - -mirror::Class* BoxClassTable::GetOrCreateBoxClass(const char* class_name, - const Handle<mirror::ClassLoader>& class_loader) { - DCHECK(class_name != nullptr); - - Thread* self = Thread::Current(); - - std::string class_name_str = class_name; - - { - MutexLock mu(self, *Locks::lambda_class_table_lock_); - - // Attempt to look up this class, it's possible it was already created previously. - // If this is the case we *must* return the same class as before to maintain - // referential equality between box instances. - // - // In managed code: - // Functional f = () -> 5; // vF = create-lambda - // Object a = f; // vA = box-lambda vA - // Object b = f; // vB = box-lambda vB - // assert(a.getClass() == b.getClass()) - // assert(a == b) - ValueType value = FindBoxedClass(class_name_str); - if (!value.IsNull()) { - return value.Read(); - } - } - - // Otherwise we need to generate a class ourselves and insert it into the hash map - - // Release the table lock here, which implicitly allows other threads to suspend - // (since the GC callbacks will not block on trying to acquire our lock). - // We also don't want to call into the class linker with the lock held because - // our lock level is lower. - self->AllowThreadSuspension(); - - // Create a lambda proxy class, within the specified class loader. - mirror::Class* lambda_proxy_class = CreateClass(self, class_name_str, class_loader); - - // There are no thread suspension points after this, so we don't need to put it into a handle. - ScopedAssertNoThreadSuspension soants{self, "BoxClassTable::GetOrCreateBoxClass"}; // NOLINT: [readability/braces] [4] - - if (UNLIKELY(lambda_proxy_class == nullptr)) { - // Most likely an OOM has occurred. - CHECK(self->IsExceptionPending()); - return nullptr; - } - - { - MutexLock mu(self, *Locks::lambda_class_table_lock_); - - // Possible, but unlikely, that someone already came in and made a proxy class - // on another thread. - ValueType value = FindBoxedClass(class_name_str); - if (UNLIKELY(!value.IsNull())) { - DCHECK_EQ(lambda_proxy_class, value.Read()); - return value.Read(); - } - - // Otherwise we made a brand new proxy class. - // The class itself is cleaned up by the GC (e.g. class unloading) later. - - // Actually insert into the table. - map_.Insert({std::move(class_name_str), ValueType(lambda_proxy_class)}); - } - - return lambda_proxy_class; -} - -BoxClassTable::ValueType BoxClassTable::FindBoxedClass(const std::string& class_name) const { - auto map_iterator = map_.Find(class_name); - if (map_iterator != map_.end()) { - const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator; - const ValueType& value = key_value_pair.second; - - DCHECK(!value.IsNull()); // Never store null boxes. - return value; - } - - return ValueType(nullptr); -} - -void BoxClassTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const { - item.first.clear(); - - Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); - item.second = ValueType(); // Also clear the GC root. -} - -bool BoxClassTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const { - bool is_empty = item.first.empty(); - DCHECK_EQ(item.second.IsNull(), is_empty); - - return is_empty; -} - -bool BoxClassTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs, - const UnorderedMapKeyType& rhs) const { - // Be damn sure the classes don't just move around from under us. - Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); - - // Being the same class name isn't enough, must also have the same class loader. - // When we are in the same class loader, classes are equal via the pointer. - return lhs == rhs; -} - -size_t BoxClassTable::HashFn::operator()(const UnorderedMapKeyType& key) const { - return std::hash<std::string>()(key); -} - -} // namespace lambda -} // namespace art diff --git a/runtime/lambda/box_class_table.h b/runtime/lambda/box_class_table.h deleted file mode 100644 index 17e10265f1..0000000000 --- a/runtime/lambda/box_class_table.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_ -#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_ - -#include "base/allocator.h" -#include "base/hash_map.h" -#include "gc_root.h" -#include "base/macros.h" -#include "base/mutex.h" -#include "object_callbacks.h" - -#include <stdint.h> - -namespace art { - -class ArtMethod; // forward declaration -template<class T> class Handle; // forward declaration - -namespace mirror { -class Class; // forward declaration -class ClassLoader; // forward declaration -class LambdaProxy; // forward declaration -class Object; // forward declaration -} // namespace mirror - -namespace lambda { -struct Closure; // forward declaration - -/* - * Store a table of boxed lambdas. This is required to maintain object referential equality - * when a lambda is re-boxed. - * - * Conceptually, we store a mapping of Class Name -> Weak Reference<Class>. - * When too many objects get GCd, we shrink the underlying table to use less space. - */ -class BoxClassTable FINAL { - public: - // TODO: This should take a LambdaArtMethod instead, read class name from that. - // Note: null class_loader means bootclasspath. - mirror::Class* GetOrCreateBoxClass(const char* class_name, - const Handle<mirror::ClassLoader>& class_loader) - REQUIRES(!Locks::lambda_class_table_lock_, !Roles::uninterruptible_) - SHARED_REQUIRES(Locks::mutator_lock_); - - // Sweep strong references to lambda class boxes. Update the addresses if the objects - // have been moved, and delete them from the table if the objects have been cleaned up. - template <typename Visitor> - void VisitRoots(const Visitor& visitor) - NO_THREAD_SAFETY_ANALYSIS // for object marking requiring heap bitmap lock - REQUIRES(!Locks::lambda_class_table_lock_) - SHARED_REQUIRES(Locks::mutator_lock_); - - BoxClassTable(); - ~BoxClassTable(); - - private: - // We only store strong GC roots in our table. - using ValueType = GcRoot<mirror::Class>; - - // Attempt to look up the class in the map, or return null if it's not there yet. - ValueType FindBoxedClass(const std::string& class_name) const - SHARED_REQUIRES(Locks::lambda_class_table_lock_); - - // Store the key as a string so that we can have our own copy of the class name. - using UnorderedMapKeyType = std::string; - - // EmptyFn implementation for art::HashMap - struct EmptyFn { - void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const - NO_THREAD_SAFETY_ANALYSIS; - // SHARED_REQUIRES(Locks::mutator_lock_); - - bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const; - }; - - // HashFn implementation for art::HashMap - struct HashFn { - size_t operator()(const UnorderedMapKeyType& key) const - NO_THREAD_SAFETY_ANALYSIS; - // SHARED_REQUIRES(Locks::mutator_lock_); - }; - - // EqualsFn implementation for art::HashMap - struct EqualsFn { - bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const - NO_THREAD_SAFETY_ANALYSIS; - // SHARED_REQUIRES(Locks::mutator_lock_); - }; - - using UnorderedMap = art::HashMap<UnorderedMapKeyType, - ValueType, - EmptyFn, - HashFn, - EqualsFn, - TrackingAllocator<std::pair<UnorderedMapKeyType, ValueType>, - kAllocatorTagLambdaProxyClassBoxTable>>; - - // Map of strong GC roots (lambda interface name -> lambda proxy class) - UnorderedMap map_ GUARDED_BY(Locks::lambda_class_table_lock_); - - // Shrink the map when we get below this load factor. - // (This is an arbitrary value that should be large enough to prevent aggressive map erases - // from shrinking the table too often.) - static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2; - - DISALLOW_COPY_AND_ASSIGN(BoxClassTable); -}; - -} // namespace lambda -} // namespace art - -#endif // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_ diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc index 0032d081c6..9918bb71f3 100644 --- a/runtime/lambda/box_table.cc +++ b/runtime/lambda/box_table.cc @@ -18,10 +18,8 @@ #include "base/mutex.h" #include "common_throws.h" #include "gc_root-inl.h" -#include "lambda/box_class_table.h" #include "lambda/closure.h" #include "lambda/leaking_allocator.h" -#include "mirror/lambda_proxy.h" #include "mirror/method.h" #include "mirror/object-inl.h" #include "thread.h" @@ -30,13 +28,12 @@ namespace art { namespace lambda { -// All closures are boxed into a subtype of LambdaProxy which implements the lambda's interface. -using BoxedClosurePointerType = mirror::LambdaProxy*; +// Temporarily represent the lambda Closure as its raw bytes in an array. +// TODO: Generate a proxy class for the closure when boxing the first time. +using BoxedClosurePointerType = mirror::ByteArray*; -// Returns the base class for all boxed closures. -// Note that concrete closure boxes are actually a subtype of mirror::LambdaProxy. -static mirror::Class* GetBoxedClosureBaseClass() SHARED_REQUIRES(Locks::mutator_lock_) { - return Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangLambdaProxy); +static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) { + return mirror::ByteArray::GetArrayClass(); } namespace { @@ -57,14 +54,6 @@ namespace { return closure; } }; - - struct DeleterForClosure { - void operator()(Closure* closure) const { - ClosureAllocator::Delete(closure); - } - }; - - using UniqueClosurePtr = std::unique_ptr<Closure, DeleterForClosure>; } // namespace BoxTable::BoxTable() @@ -86,9 +75,7 @@ BoxTable::~BoxTable() { } } -mirror::Object* BoxTable::BoxLambda(const ClosureType& closure, - const char* class_name, - mirror::ClassLoader* class_loader) { +mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { Thread* self = Thread::Current(); { @@ -104,7 +91,7 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure, // Functional f = () -> 5; // vF = create-lambda // Object a = f; // vA = box-lambda vA // Object b = f; // vB = box-lambda vB - // assert(a == b) + // assert(a == f) ValueType value = FindBoxedLambda(closure); if (!value.IsNull()) { return value.Read(); @@ -113,62 +100,30 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure, // Otherwise we need to box ourselves and insert it into the hash map } - // Convert the Closure into a managed object instance, whose supertype of java.lang.LambdaProxy. + // Release the lambda table lock here, so that thread suspension is allowed. - // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object - StackHandleScope<2> hs{self}; // NOLINT: [readability/braces] [4] + // Convert the Closure into a managed byte[] which will serve + // as the temporary 'boxed' version of the lambda. This is good enough + // to check all the basic object identities that a boxed lambda must retain. + // It's also good enough to contain all the captured primitive variables. - Handle<mirror::ClassLoader> class_loader_handle = hs.NewHandle(class_loader); + // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class + // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object + BoxedClosurePointerType closure_as_array_object = + mirror::ByteArray::Alloc(self, closure->GetSize()); - // Release the lambda table lock here, so that thread suspension is allowed. - self->AllowThreadSuspension(); - - lambda::BoxClassTable* lambda_box_class_table; - - // Find the lambda box class table, which can be in the system class loader if classloader is null - if (class_loader == nullptr) { - ScopedObjectAccessUnchecked soa(self); - mirror::ClassLoader* system_class_loader = - soa.Decode<mirror::ClassLoader*>(Runtime::Current()->GetSystemClassLoader()); - lambda_box_class_table = system_class_loader->GetLambdaProxyCache(); - } else { - lambda_box_class_table = class_loader_handle->GetLambdaProxyCache(); - // OK: can't be deleted while we hold a handle to the class loader. - } - DCHECK(lambda_box_class_table != nullptr); + // There are no thread suspension points after this, so we don't need to put it into a handle. - Handle<mirror::Class> closure_class(hs.NewHandle( - lambda_box_class_table->GetOrCreateBoxClass(class_name, class_loader_handle))); - if (UNLIKELY(closure_class.Get() == nullptr)) { + if (UNLIKELY(closure_as_array_object == nullptr)) { // Most likely an OOM has occurred. - self->AssertPendingException(); + CHECK(self->IsExceptionPending()); return nullptr; } - BoxedClosurePointerType closure_as_object = nullptr; - UniqueClosurePtr closure_table_copy; - // Create an instance of the class, and assign the pointer to the closure into it. - { - closure_as_object = down_cast<BoxedClosurePointerType>(closure_class->AllocObject(self)); - if (UNLIKELY(closure_as_object == nullptr)) { - self->AssertPendingOOMException(); - return nullptr; - } - - // Make a copy of the closure that we will store in the hash map. - // The proxy instance will also point to this same hash map. - // Note that the closure pointer is cleaned up only after the proxy is GCd. - closure_table_copy.reset(ClosureAllocator::Allocate(closure->GetSize())); - closure_as_object->SetClosure(closure_table_copy.get()); - } - - // There are no thread suspension points after this, so we don't need to put it into a handle. - ScopedAssertNoThreadSuspension soants{self, // NOLINT: [whitespace/braces] [5] - "box lambda table - box lambda - no more suspensions"}; // NOLINT: [whitespace/braces] [5] - - // Write the raw closure data into the proxy instance's copy of the closure. - closure->CopyTo(closure_table_copy.get(), - closure->GetSize()); + // Write the raw closure data into the byte[]. + closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size + 0 /*index*/), // index + closure_as_array_object->GetLength()); // The method has been successfully boxed into an object, now insert it into the hash map. { @@ -179,21 +134,24 @@ mirror::Object* BoxTable::BoxLambda(const ClosureType& closure, // we were allocating the object before. ValueType value = FindBoxedLambda(closure); if (UNLIKELY(!value.IsNull())) { - // Let the GC clean up closure_as_object at a later time. - // (We will not see this object when sweeping, it wasn't inserted yet.) - closure_as_object->SetClosure(nullptr); + // Let the GC clean up method_as_object at a later time. return value.Read(); } // Otherwise we need to insert it into the hash map in this thread. - // The closure_table_copy is deleted by us manually when we erase it from the map. + // Make a copy for the box table to keep, in case the closure gets collected from the stack. + // TODO: GC may need to sweep for roots in the box table's copy of the closure. + Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize()); + closure->CopyTo(closure_table_copy, closure->GetSize()); + + // The closure_table_copy needs to be deleted by us manually when we erase it from the map. // Actually insert into the table. - map_.Insert({closure_table_copy.release(), ValueType(closure_as_object)}); + map_.Insert({closure_table_copy, ValueType(closure_as_array_object)}); } - return closure_as_object; + return closure_as_array_object; } bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { @@ -207,35 +165,29 @@ bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { mirror::Object* boxed_closure_object = object; - // Raise ClassCastException if object is not instanceof LambdaProxy - if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureBaseClass()))) { - ThrowClassCastException(GetBoxedClosureBaseClass(), boxed_closure_object->GetClass()); + // Raise ClassCastException if object is not instanceof byte[] + if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) { + ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass()); return false; } // TODO(iam): We must check that the closure object extends/implements the type - // specified in [type id]. This is not currently implemented since the type id is unavailable. + // specified in [type id]. This is not currently implemented since it's always a byte[]. // If we got this far, the inputs are valid. - // Shuffle the java.lang.LambdaProxy back into a raw closure, then allocate it, copy, - // and return it. - BoxedClosurePointerType boxed_closure = + // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it. + BoxedClosurePointerType boxed_closure_as_array = down_cast<BoxedClosurePointerType>(boxed_closure_object); - DCHECK_ALIGNED(boxed_closure->GetClosure(), alignof(Closure)); - const Closure* aligned_interior_closure = boxed_closure->GetClosure(); - DCHECK(aligned_interior_closure != nullptr); - - // TODO: we probably don't need to make a copy here later on, once there's GC support. + const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData(); // Allocate a copy that can "escape" and copy the closure data into that. Closure* unboxed_closure = - LeakingAllocator::MakeFlexibleInstance<Closure>(self, aligned_interior_closure->GetSize()); - DCHECK_ALIGNED(unboxed_closure, alignof(Closure)); + LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength()); // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix. - memcpy(unboxed_closure, aligned_interior_closure, aligned_interior_closure->GetSize()); + memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength()); - DCHECK_EQ(unboxed_closure->GetSize(), aligned_interior_closure->GetSize()); + DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength())); *out_closure = unboxed_closure; return true; @@ -284,10 +236,9 @@ void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) { if (new_value == nullptr) { // The object has been swept away. - Closure* closure = key_value_pair.first; + const ClosureType& closure = key_value_pair.first; // Delete the entry from the map. - // (Remove from map first to avoid accessing dangling pointer). map_iterator = map_.Erase(map_iterator); // Clean up the memory by deleting the closure. @@ -339,10 +290,7 @@ void BoxTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& ite } bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const { - bool is_empty = item.first == nullptr; - DCHECK_EQ(item.second.IsNull(), is_empty); - - return is_empty; + return item.first == nullptr; } bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs, diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h index 9dca6ab66b..adb733271e 100644 --- a/runtime/lambda/box_table.h +++ b/runtime/lambda/box_table.h @@ -30,9 +30,6 @@ namespace art { class ArtMethod; // forward declaration namespace mirror { -class Class; // forward declaration -class ClassLoader; // forward declaration -class LambdaProxy; // forward declaration class Object; // forward declaration } // namespace mirror @@ -51,11 +48,8 @@ class BoxTable FINAL { using ClosureType = art::lambda::Closure*; // Boxes a closure into an object. Returns null and throws an exception on failure. - mirror::Object* BoxLambda(const ClosureType& closure, - const char* class_name, - mirror::ClassLoader* class_loader) - REQUIRES(!Locks::lambda_table_lock_, !Roles::uninterruptible_) - SHARED_REQUIRES(Locks::mutator_lock_); + mirror::Object* BoxLambda(const ClosureType& closure) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_); // Unboxes an object back into the lambda. Returns false and throws an exception on failure. bool UnboxLambda(mirror::Object* object, ClosureType* out_closure) @@ -134,16 +128,7 @@ class BoxTable FINAL { TrackingAllocator<std::pair<ClosureType, ValueType>, kAllocatorTagLambdaBoxTable>>; - using ClassMap = art::HashMap<std::string, - GcRoot<mirror::Class>, - EmptyFn, - HashFn, - EqualsFn, - TrackingAllocator<std::pair<ClosureType, ValueType>, - kAllocatorTagLambdaProxyClassBoxTable>>; - UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_); - UnorderedMap classes_map_ GUARDED_BY(Locks::lambda_table_lock_); bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_); ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_); diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc index f935e049fd..179e4ee7f2 100644 --- a/runtime/lambda/closure.cc +++ b/runtime/lambda/closure.cc @@ -20,6 +20,9 @@ #include "lambda/art_lambda_method.h" #include "runtime/mirror/object_reference.h" +static constexpr const bool kClosureSupportsReferences = false; +static constexpr const bool kClosureSupportsGarbageCollection = false; + namespace art { namespace lambda { @@ -125,10 +128,6 @@ ArtMethod* Closure::GetTargetMethod() const { return const_cast<ArtMethod*>(lambda_info_->GetArtMethod()); } -ArtLambdaMethod* Closure::GetLambdaInfo() const { - return const_cast<ArtLambdaMethod*>(lambda_info_); -} - uint32_t Closure::GetHashCode() const { // Start with a non-zero constant, a prime number. uint32_t result = 17; diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h index 38ec063ed2..31ff1944d2 100644 --- a/runtime/lambda/closure.h +++ b/runtime/lambda/closure.h @@ -33,52 +33,12 @@ namespace lambda { class ArtLambdaMethod; // forward declaration class ClosureBuilder; // forward declaration -// TODO: Remove these constants once closures are supported properly. - -// Does the lambda closure support containing references? If so, all the users of lambdas -// must be updated to also support references. -static constexpr const bool kClosureSupportsReferences = false; -// Does the lambda closure support being garbage collected? If so, all the users of lambdas -// must be updated to also support garbage collection. -static constexpr const bool kClosureSupportsGarbageCollection = false; -// Does the lambda closure support being garbage collected with a read barrier? If so, -// all the users of the lambdas msut also be updated to support read barrier GC. -static constexpr const bool kClosureSupportsReadBarrier = false; - -// Is this closure being stored as a 'long' in shadow frames and the quick ABI? -static constexpr const bool kClosureIsStoredAsLong = true; - - -// Raw memory layout for the lambda closure. -// -// WARNING: -// * This should only be used by the compiler and tests, as they need to offsetof the raw fields. -// * Runtime/interpreter should always access closures through a Closure pointer. -struct ClosureStorage { - // Compile-time known lambda information such as the type descriptor and size. - ArtLambdaMethod* lambda_info_; - - // A contiguous list of captured variables, and possibly the closure size. - // The runtime size can always be determined through GetSize(). - union { - // Read from here if the closure size is static (ArtLambdaMethod::IsStatic) - uint8_t static_variables_[0]; - struct { - // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic) - size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size. - uint8_t variables_[0]; - } dynamic_; - } captured_[0]; - // captured_ will always consist of one array element at runtime. - // Set to [0] so that 'size_' is not counted in sizeof(Closure). -}; - // Inline representation of a lambda closure. // Contains the target method and the set of packed captured variables as a copy. // // The closure itself is logically immutable, although in practice any object references // it (recursively) contains can be moved and updated by the GC. -struct Closure : private ClosureStorage { +struct PACKED(sizeof(ArtLambdaMethod*)) Closure { // Get the size of the Closure in bytes. // This is necessary in order to allocate a large enough area to copy the Closure into. // Do *not* copy the closure with memcpy, since references also need to get moved. @@ -92,9 +52,6 @@ struct Closure : private ClosureStorage { // Get the target method, i.e. the method that will be dispatched into with invoke-lambda. ArtMethod* GetTargetMethod() const; - // Get the static lambda info that never changes. - ArtLambdaMethod* GetLambdaInfo() const; - // Calculates the hash code. Value is recomputed each time. uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_); @@ -199,15 +156,28 @@ struct Closure : private ClosureStorage { static size_t GetClosureSize(const uint8_t* closure); /////////////////////////////////////////////////////////////////////////////////// - // NOTE: Actual fields are declared in ClosureStorage. + + // Compile-time known lambda information such as the type descriptor and size. + ArtLambdaMethod* lambda_info_; + + // A contiguous list of captured variables, and possibly the closure size. + // The runtime size can always be determined through GetSize(). + union { + // Read from here if the closure size is static (ArtLambdaMethod::IsStatic) + uint8_t static_variables_[0]; + struct { + // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic) + size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size. + uint8_t variables_[0]; + } dynamic_; + } captured_[0]; + // captured_ will always consist of one array element at runtime. + // Set to [0] so that 'size_' is not counted in sizeof(Closure). + + friend class ClosureBuilder; friend class ClosureTest; }; -// ABI guarantees: -// * Closure same size as a ClosureStorage -// * ClosureStorage begins at the same point a Closure would begin. -static_assert(sizeof(Closure) == sizeof(ClosureStorage), "Closure size must match ClosureStorage"); - } // namespace lambda } // namespace art diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc index 7b36042921..739e965238 100644 --- a/runtime/lambda/closure_builder.cc +++ b/runtime/lambda/closure_builder.cc @@ -75,7 +75,7 @@ void ClosureBuilder::CaptureVariableLambda(Closure* closure) { if (LIKELY(is_dynamic_size_ == false)) { // Write in the extra bytes to store the dynamic size the first time. is_dynamic_size_ = true; - size_ += sizeof(ClosureStorage::captured_[0].dynamic_.size_); + size_ += sizeof(Closure::captured_[0].dynamic_.size_); } // A closure may be sized dynamically, so always query it for the true size. @@ -107,40 +107,38 @@ Closure* ClosureBuilder::CreateInPlace(void* memory, ArtLambdaMethod* target_met << "number of variables captured at runtime does not match " << "number of variables captured at compile time"; - ClosureStorage* closure_storage = new (memory) ClosureStorage; - closure_storage->lambda_info_ = target_method; + Closure* closure = new (memory) Closure; + closure->lambda_info_ = target_method; - static_assert(offsetof(ClosureStorage, captured_) == kInitialSize, "wrong initial size"); + static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size"); size_t written_size; if (UNLIKELY(is_dynamic_size_)) { // The closure size must be set dynamically (i.e. nested lambdas). - closure_storage->captured_[0].dynamic_.size_ = GetSize(); - size_t header_size = offsetof(ClosureStorage, captured_[0].dynamic_.variables_); + closure->captured_[0].dynamic_.size_ = GetSize(); + size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_); DCHECK_LE(header_size, GetSize()); size_t variables_size = GetSize() - header_size; written_size = WriteValues(target_method, - closure_storage->captured_[0].dynamic_.variables_, + closure->captured_[0].dynamic_.variables_, header_size, variables_size); } else { // The closure size is known statically (i.e. no nested lambdas). DCHECK(GetSize() == target_method->GetStaticClosureSize()); - size_t header_size = offsetof(ClosureStorage, captured_[0].static_variables_); + size_t header_size = offsetof(Closure, captured_[0].static_variables_); DCHECK_LE(header_size, GetSize()); size_t variables_size = GetSize() - header_size; written_size = WriteValues(target_method, - closure_storage->captured_[0].static_variables_, + closure->captured_[0].static_variables_, header_size, variables_size); } - // OK: The closure storage is guaranteed to be the same as a closure. - Closure* closure = reinterpret_cast<Closure*>(closure_storage); - DCHECK_EQ(written_size, closure->GetSize()); + return closure; } diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h index 54bb4d4fe6..46ddaa9ab3 100644 --- a/runtime/lambda/shorty_field_type.h +++ b/runtime/lambda/shorty_field_type.h @@ -285,39 +285,6 @@ struct ShortyFieldType : ValueObject { } } - // Get the number of virtual registers necessary to represent this type as a stack local. - inline size_t GetVirtualRegisterCount() const { - if (IsPrimitiveNarrow()) { - return 1; - } else if (IsPrimitiveWide()) { - return 2; - } else if (IsObject()) { - return kObjectReferenceSize / sizeof(uint32_t); - } else if (IsLambda()) { - return 2; - } else { - DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'"; - UNREACHABLE(); - } - } - - // Count how many virtual registers would be necessary in order to store this list of shorty - // field types. - inline size_t static CountVirtualRegistersRequired(const char* shorty) { - size_t size = 0; - - while (shorty != nullptr && *shorty != '\0') { - // Each argument appends to the size. - ShortyFieldType shorty_field{*shorty}; // NOLINT [readability/braces] [4] - - size += shorty_field.GetVirtualRegisterCount(); - - ++shorty; - } - - return size; - } - // Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection. inline operator decltype(kByte)() const { return value_; diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc index 430e39e94d..32bade9b56 100644 --- a/runtime/lambda/shorty_field_type_test.cc +++ b/runtime/lambda/shorty_field_type_test.cc @@ -218,56 +218,6 @@ TEST_F(ShortyFieldTypeTest, TestParseFromFieldTypeDescriptor) { } } // TEST_F -TEST_F(ShortyFieldTypeTest, TestCalculateVRegSize) { - // Make sure the single calculation for each value is correct. - std::pair<size_t, char> expected_actual_single[] = { - // Primitives - { 1u, 'Z' }, - { 1u, 'B' }, - { 1u, 'C' }, - { 1u, 'S' }, - { 1u, 'I' }, - { 1u, 'F' }, - { 2u, 'J' }, - { 2u, 'D' }, - // Non-primitives - { 1u, 'L' }, - { 2u, '\\' }, - }; - - for (auto pair : expected_actual_single) { - SCOPED_TRACE(pair.second); - EXPECT_EQ(pair.first, ShortyFieldType(pair.second).GetVirtualRegisterCount()); - } - - // Make sure we are correctly calculating how many virtual registers a shorty descriptor takes. - std::pair<size_t, const char*> expected_actual[] = { - // Empty list - { 0u, "" }, - // Primitives - { 1u, "Z" }, - { 1u, "B" }, - { 1u, "C" }, - { 1u, "S" }, - { 1u, "I" }, - { 1u, "F" }, - { 2u, "J" }, - { 2u, "D" }, - // Non-primitives - { 1u, "L" }, - { 2u, "\\" }, - // Multiple things at once: - { 10u, "ZBCSIFJD" }, - { 5u, "LLSSI" }, - { 6u, "LLL\\L" } - }; - - for (auto pair : expected_actual) { - SCOPED_TRACE(pair.second); - EXPECT_EQ(pair.first, ShortyFieldType::CountVirtualRegistersRequired(pair.second)); - } -} // TEST_F - // Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests. template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum> struct ShortyTypeCharacteristics { diff --git a/runtime/lambda_proxy_test.cc b/runtime/lambda_proxy_test.cc deleted file mode 100644 index 63d6cccedb..0000000000 --- a/runtime/lambda_proxy_test.cc +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <jni.h> -#include <vector> - -#include "art_field-inl.h" -#include "class_linker-inl.h" -#include "compiler_callbacks.h" -#include "common_compiler_test.h" -#include "mirror/field-inl.h" -#include "mirror/lambda_proxy.h" -#include "mirror/method.h" -#include "scoped_thread_state_change.h" - -namespace art { - -// The enclosing class of all the interfaces used by this test. -// -- Defined as a macro to allow for string concatenation. -#define TEST_INTERFACE_ENCLOSING_CLASS_NAME "LambdaInterfaces" -// Generate out "LLambdaInterfaces$<<iface>>;" , replacing <<iface>> with the interface name. -#define MAKE_TEST_INTERFACE_NAME(iface) ("L" TEST_INTERFACE_ENCLOSING_CLASS_NAME "$" iface ";") - -#define ASSERT_NOT_NULL(x) ASSERT_TRUE((x) != nullptr) -#define ASSERT_NULL(x) ASSERT_TRUE((x) == nullptr) -#define EXPECT_NULL(x) EXPECT_TRUE((x) == nullptr) - -class LambdaProxyTest // : public CommonCompilerTest { - : public CommonRuntimeTest { - public: - // Generate a lambda proxy class with the given name and interfaces. This is a simplification from what - // libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and - // we do not declare exceptions. - mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, - jobject jclass_loader, - const char* class_name, - const std::vector<mirror::Class*>& interfaces) - SHARED_REQUIRES(Locks::mutator_lock_) { - CHECK(class_name != nullptr); - CHECK(jclass_loader != nullptr); - - mirror::Class* java_lang_object = - class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"); - CHECK(java_lang_object != nullptr); - - jclass java_lang_class = soa.AddLocalReference<jclass>(mirror::Class::GetJavaLangClass()); - - // Builds the interfaces array. - jobjectArray proxy_class_interfaces = soa.Env()->NewObjectArray(interfaces.size(), - java_lang_class, - nullptr); // No initial element. - soa.Self()->AssertNoPendingException(); - for (size_t i = 0; i < interfaces.size(); ++i) { - soa.Env()->SetObjectArrayElement(proxy_class_interfaces, - i, - soa.AddLocalReference<jclass>(interfaces[i])); - } - - // Builds the method array. - jsize methods_count = 3; // Object.equals, Object.hashCode and Object.toString. - for (mirror::Class* interface : interfaces) { - methods_count += interface->NumVirtualMethods(); - } - jobjectArray proxy_class_methods = - soa.Env()->NewObjectArray(methods_count, - soa.AddLocalReference<jclass>(mirror::Method::StaticClass()), - nullptr); // No initial element. - soa.Self()->AssertNoPendingException(); - - jsize array_index = 0; - - // - // Fill the method array with the Object and all the interface's virtual methods. - // - - // Add a method to 'proxy_class_methods' - auto add_method_to_array = [&](ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { - CHECK(method != nullptr); - soa.Env()->SetObjectArrayElement(proxy_class_methods, - array_index++, - soa.AddLocalReference<jobject>( - mirror::Method::CreateFromArtMethod(soa.Self(), - method)) - ); // NOLINT: [whitespace/parens] [2] - - LOG(DEBUG) << "Add " << PrettyMethod(method) << " to list of methods to generate proxy"; - }; - // Add a method to 'proxy_class_methods' by looking it up from java.lang.Object - auto add_method_to_array_by_lookup = [&](const char* name, const char* method_descriptor) - SHARED_REQUIRES(Locks::mutator_lock_) { - ArtMethod* method = java_lang_object->FindDeclaredVirtualMethod(name, - method_descriptor, - sizeof(void*)); - add_method_to_array(method); - }; - - // Add all methods from Object. - add_method_to_array_by_lookup("equals", "(Ljava/lang/Object;)Z"); - add_method_to_array_by_lookup("hashCode", "()I"); - add_method_to_array_by_lookup("toString", "()Ljava/lang/String;"); - - // Now adds all interfaces virtual methods. - for (mirror::Class* interface : interfaces) { - mirror::Class* next_class = interface; - do { - for (ArtMethod& method : next_class->GetVirtualMethods(sizeof(void*))) { - add_method_to_array(&method); - } - next_class = next_class->GetSuperClass(); - } while (!next_class->IsObjectClass()); - // Skip adding any methods from "Object". - } - CHECK_EQ(array_index, methods_count); - - // Builds an empty exception array. - jobjectArray proxy_class_throws = soa.Env()->NewObjectArray(0 /* length */, - java_lang_class, - nullptr /* initial element*/); - soa.Self()->AssertNoPendingException(); - - bool already_exists; - mirror::Class* proxy_class = - class_linker_->CreateLambdaProxyClass(soa, - soa.Env()->NewStringUTF(class_name), - proxy_class_interfaces, - jclass_loader, - proxy_class_methods, - proxy_class_throws, - /*out*/&already_exists); - - CHECK(!already_exists); - - soa.Self()->AssertNoPendingException(); - return proxy_class; - } - - LambdaProxyTest() { - } - - virtual void SetUp() { - CommonRuntimeTest::SetUp(); - } - - virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) { - // Do not have any compiler options because we don't want to run as an AOT - // (In particular the lambda proxy class generation isn't currently supported for AOT). - this->callbacks_.reset(); - } - - template <typename THandleScope> - Handle<mirror::Class> GenerateProxyClass(THandleScope& hs, - const char* name, - const std::vector<mirror::Class*>& interfaces) - SHARED_REQUIRES(Locks::mutator_lock_) { - return hs.NewHandle(GenerateProxyClass(*soa_, jclass_loader_, name, interfaces)); - } - - protected: - ScopedObjectAccess* soa_ = nullptr; - jobject jclass_loader_ = nullptr; -}; - -// Creates a lambda proxy class and check ClassHelper works correctly. -TEST_F(LambdaProxyTest, ProxyClassHelper) { - // gLogVerbosity.class_linker = true; // Uncomment to enable class linker logging. - - ASSERT_NOT_NULL(Thread::Current()); - - ScopedObjectAccess soa(Thread::Current()); - soa_ = &soa; - - // Must happen after CommonRuntimeTest finishes constructing the runtime. - jclass_loader_ = LoadDex(TEST_INTERFACE_ENCLOSING_CLASS_NAME); - jobject jclass_loader = jclass_loader_; - - StackHandleScope<4> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - - Handle<mirror::Class> J(hs.NewHandle( - class_linker_->FindClass(soa.Self(), MAKE_TEST_INTERFACE_NAME("J"), class_loader))); - ASSERT_TRUE(J.Get() != nullptr); - - std::vector<mirror::Class*> interfaces; - interfaces.push_back(J.Get()); - Handle<mirror::Class> proxy_class(hs.NewHandle( - GenerateProxyClass(soa, jclass_loader, "$Proxy1234", interfaces))); - interfaces.clear(); // Don't least possibly stale objects in the array as good practice. - ASSERT_TRUE(proxy_class.Get() != nullptr); - ASSERT_TRUE(proxy_class->IsLambdaProxyClass()); - ASSERT_TRUE(proxy_class->IsInitialized()); - - EXPECT_EQ(1U, proxy_class->NumDirectInterfaces()); // LambdaInterfaces$J. - EXPECT_EQ(J.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 0)); - std::string temp; - const char* proxy_class_descriptor = proxy_class->GetDescriptor(&temp); - EXPECT_STREQ("L$Proxy1234;", proxy_class_descriptor); - EXPECT_EQ(nullptr, proxy_class->GetSourceFile()); - - // Make sure all the virtual methods are marked as a proxy - for (ArtMethod& method : proxy_class->GetVirtualMethods(sizeof(void*))) { - SCOPED_TRACE(PrettyMethod(&method, /* with_signature */true)); - EXPECT_TRUE(method.IsProxyMethod()); - EXPECT_TRUE(method.IsLambdaProxyMethod()); - EXPECT_FALSE(method.IsReflectProxyMethod()); - } -} - -// Creates a proxy class and check FieldHelper works correctly. -TEST_F(LambdaProxyTest, ProxyFieldHelper) { - // gLogVerbosity.class_linker = true; // Uncomment to enable class linker logging. - - ASSERT_NOT_NULL(Thread::Current()); - - ScopedObjectAccess soa(Thread::Current()); - soa_ = &soa; - - // Must happen after CommonRuntimeTest finishes constructing the runtime. - jclass_loader_ = LoadDex(TEST_INTERFACE_ENCLOSING_CLASS_NAME); - jobject jclass_loader = jclass_loader_; - - StackHandleScope<9> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - - Handle<mirror::Class> I(hs.NewHandle( - class_linker_->FindClass(soa.Self(), MAKE_TEST_INTERFACE_NAME("I"), class_loader))); - ASSERT_NOT_NULL(I.Get()); - - // Create the lambda proxy which implements interfaces "I". - Handle<mirror::Class> proxy_class = GenerateProxyClass(hs, - "$Proxy1234", - { I.Get() }); // Interfaces. - - ASSERT_NOT_NULL(proxy_class.Get()); - EXPECT_TRUE(proxy_class->IsLambdaProxyClass()); - EXPECT_TRUE(proxy_class->IsInitialized()); - EXPECT_NULL(proxy_class->GetIFieldsPtr()); - - LengthPrefixedArray<ArtField>* static_fields = proxy_class->GetSFieldsPtr(); - ASSERT_NOT_NULL(static_fields); - - // Must have "throws" and "interfaces" static fields. - ASSERT_EQ(+mirror::LambdaProxy::kStaticFieldCount, proxy_class->NumStaticFields()); - - static constexpr const char* kInterfacesClassName = "[Ljava/lang/Class;"; - static constexpr const char* kThrowsClassName = "[[Ljava/lang/Class;"; - - // Class for "interfaces" field. - Handle<mirror::Class> interfaces_field_class = - hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), kInterfacesClassName)); - ASSERT_NOT_NULL(interfaces_field_class.Get()); - - // Class for "throws" field. - Handle<mirror::Class> throws_field_class = - hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), kThrowsClassName)); - ASSERT_NOT_NULL(throws_field_class.Get()); - - // Helper to test the static fields for correctness. - auto test_static_field = [&](size_t index, - const char* field_name, - Handle<mirror::Class>& handle_class, - const char* class_name) - SHARED_REQUIRES(Locks::mutator_lock_) { - ArtField* field = &static_fields->At(index); - EXPECT_STREQ(field_name, field->GetName()); - EXPECT_STREQ(class_name, field->GetTypeDescriptor()); - EXPECT_EQ(handle_class.Get(), field->GetType</*kResolve*/true>()) - << "Expected: " << PrettyClass(interfaces_field_class.Get()) << ", " - << "Actual: " << PrettyClass(field->GetType</*kResolve*/true>()) << ", " - << "field_name: " << field_name; - std::string temp; - EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp)); - EXPECT_FALSE(field->IsPrimitiveType()); - }; - - // Test "Class[] interfaces" field. - test_static_field(mirror::LambdaProxy::kStaticFieldIndexInterfaces, - "interfaces", - interfaces_field_class, - kInterfacesClassName); - - // Test "Class[][] throws" field. - test_static_field(mirror::LambdaProxy::kStaticFieldIndexThrows, - "throws", - throws_field_class, - kThrowsClassName); -} - -// Creates two proxy classes and check the art/mirror fields of their static fields. -TEST_F(LambdaProxyTest, CheckArtMirrorFieldsOfProxyStaticFields) { - // gLogVerbosity.class_linker = true; // Uncomment to enable class linker logging. - - ASSERT_NOT_NULL(Thread::Current()); - - ScopedObjectAccess soa(Thread::Current()); - soa_ = &soa; - - // Must happen after CommonRuntimeTest finishes constructing the runtime. - jclass_loader_ = LoadDex(TEST_INTERFACE_ENCLOSING_CLASS_NAME); - jobject jclass_loader = jclass_loader_; - - StackHandleScope<8> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - - Handle<mirror::Class> proxyClass0; - Handle<mirror::Class> proxyClass1; - { - Handle<mirror::Class> L(hs.NewHandle( - class_linker_->FindClass(soa.Self(), MAKE_TEST_INTERFACE_NAME("L"), class_loader))); - ASSERT_TRUE(L.Get() != nullptr); - - std::vector<mirror::Class*> interfaces = { L.Get() }; - proxyClass0 = hs.NewHandle(GenerateProxyClass(soa, jclass_loader, "$Proxy0", interfaces)); - proxyClass1 = hs.NewHandle(GenerateProxyClass(soa, jclass_loader, "$Proxy1", interfaces)); - } - - ASSERT_TRUE(proxyClass0.Get() != nullptr); - ASSERT_TRUE(proxyClass0->IsLambdaProxyClass()); - ASSERT_TRUE(proxyClass0->IsInitialized()); - ASSERT_TRUE(proxyClass1.Get() != nullptr); - ASSERT_TRUE(proxyClass1->IsLambdaProxyClass()); - ASSERT_TRUE(proxyClass1->IsInitialized()); - - LengthPrefixedArray<ArtField>* static_fields0 = proxyClass0->GetSFieldsPtr(); - ASSERT_TRUE(static_fields0 != nullptr); - ASSERT_EQ(2u, static_fields0->size()); - LengthPrefixedArray<ArtField>* static_fields1 = proxyClass1->GetSFieldsPtr(); - ASSERT_TRUE(static_fields1 != nullptr); - ASSERT_EQ(2u, static_fields1->size()); - - EXPECT_EQ(static_fields0->At(0).GetDeclaringClass(), proxyClass0.Get()); - EXPECT_EQ(static_fields0->At(1).GetDeclaringClass(), proxyClass0.Get()); - EXPECT_EQ(static_fields1->At(0).GetDeclaringClass(), proxyClass1.Get()); - EXPECT_EQ(static_fields1->At(1).GetDeclaringClass(), proxyClass1.Get()); - - Handle<mirror::Field> field00 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(0), true)); - Handle<mirror::Field> field01 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(1), true)); - Handle<mirror::Field> field10 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(0), true)); - Handle<mirror::Field> field11 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(1), true)); - EXPECT_EQ(field00->GetArtField(), &static_fields0->At(0)); - EXPECT_EQ(field01->GetArtField(), &static_fields0->At(1)); - EXPECT_EQ(field10->GetArtField(), &static_fields1->At(0)); - EXPECT_EQ(field11->GetArtField(), &static_fields1->At(1)); -} - -// TODO: make sure there's a non-abstract implementation of the single-abstract-method on the class. - -} // namespace art diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index a8685b8331..9e416dc888 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -695,11 +695,7 @@ inline bool Class::IsClassClass() { } inline const DexFile& Class::GetDexFile() { - DexCache* dex_cache = GetDexCache(); - DCHECK(dex_cache != nullptr); - const DexFile* dex_file = dex_cache->GetDexFile(); - DCHECK(dex_file != nullptr); - return *dex_file; + return *GetDexCache()->GetDexFile(); } inline bool Class::DescriptorEquals(const char* match) { @@ -707,8 +703,8 @@ inline bool Class::DescriptorEquals(const char* match) { return match[0] == '[' && GetComponentType()->DescriptorEquals(match + 1); } else if (IsPrimitive()) { return strcmp(Primitive::Descriptor(GetPrimitiveType()), match) == 0; - } else if (IsAnyProxyClass()) { - return AnyProxyDescriptorEquals(match); + } else if (IsProxyClass()) { + return ProxyDescriptorEquals(match); } else { const DexFile& dex_file = GetDexFile(); const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_); @@ -724,32 +720,22 @@ inline void Class::AssertInitializedOrInitializingInThread(Thread* self) { } } -inline ObjectArray<Class>* Class::GetInterfacesForAnyProxy() { - CHECK(IsAnyProxyClass()); +inline ObjectArray<Class>* Class::GetInterfaces() { + CHECK(IsProxyClass()); // First static field. auto* field = GetStaticField(0); DCHECK_STREQ(field->GetName(), "interfaces"); MemberOffset field_offset = field->GetOffset(); - ObjectArray<Class>* interfaces_array = GetFieldObject<ObjectArray<Class>>(field_offset); - - CHECK(interfaces_array != nullptr); - if (UNLIKELY(IsLambdaProxyClass())) { - DCHECK_EQ(1, interfaces_array->GetLength()) - << "Lambda proxies cannot have multiple direct interfaces implemented"; - } - return interfaces_array; + return GetFieldObject<ObjectArray<Class>>(field_offset); } -inline ObjectArray<ObjectArray<Class>>* Class::GetThrowsForAnyProxy() { - CHECK(IsAnyProxyClass()); +inline ObjectArray<ObjectArray<Class>>* Class::GetThrows() { + CHECK(IsProxyClass()); // Second static field. auto* field = GetStaticField(1); DCHECK_STREQ(field->GetName(), "throws"); - MemberOffset field_offset = field->GetOffset(); - auto* throws_array = GetFieldObject<ObjectArray<ObjectArray<Class>>>(field_offset); - CHECK(throws_array != nullptr); - return throws_array; + return GetFieldObject<ObjectArray<ObjectArray<Class>>>(field_offset); } inline MemberOffset Class::GetDisableIntrinsicFlagOffset() { @@ -810,8 +796,8 @@ inline uint32_t Class::NumDirectInterfaces() { return 0; } else if (IsArrayClass()) { return 2; - } else if (IsAnyProxyClass()) { - mirror::ObjectArray<mirror::Class>* interfaces = GetInterfacesForAnyProxy(); + } else if (IsProxyClass()) { + mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces(); return interfaces != nullptr ? interfaces->GetLength() : 0; } else { const DexFile::TypeList* interfaces = GetInterfaceTypeList(); diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index b2012934a2..05a9039ae9 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -538,7 +538,6 @@ ArtMethod* Class::FindVirtualMethod( ArtMethod* Class::FindClassInitializer(size_t pointer_size) { for (ArtMethod& method : GetDirectMethods(pointer_size)) { - DCHECK(reinterpret_cast<volatile void*>(&method) != nullptr); if (method.IsClassInitializer()) { DCHECK_STREQ(method.GetName(), "<clinit>"); DCHECK_STREQ(method.GetSignature().ToString().c_str(), "()V"); @@ -743,8 +742,8 @@ const char* Class::GetDescriptor(std::string* storage) { return Primitive::Descriptor(GetPrimitiveType()); } else if (IsArrayClass()) { return GetArrayDescriptor(storage); - } else if (IsAnyProxyClass()) { - *storage = Runtime::Current()->GetClassLinker()->GetDescriptorForAnyProxy(this); + } else if (IsProxyClass()) { + *storage = Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this); return storage->c_str(); } else { const DexFile& dex_file = GetDexFile(); @@ -787,10 +786,8 @@ mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> kla DCHECK_EQ(1U, idx); return class_linker->FindSystemClass(self, "Ljava/io/Serializable;"); } - } else if (klass->IsAnyProxyClass()) { - // Proxies don't have a dex cache, so look at the - // interfaces through the magic static field "interfaces" from the proxy class itself. - mirror::ObjectArray<mirror::Class>* interfaces = klass.Get()->GetInterfacesForAnyProxy(); + } else if (klass->IsProxyClass()) { + mirror::ObjectArray<mirror::Class>* interfaces = klass.Get()->GetInterfaces(); DCHECK(interfaces != nullptr); return interfaces->Get(idx); } else { @@ -829,7 +826,7 @@ const char* Class::GetSourceFile() { std::string Class::GetLocation() { mirror::DexCache* dex_cache = GetDexCache(); - if (dex_cache != nullptr && !IsAnyProxyClass()) { + if (dex_cache != nullptr && !IsProxyClass()) { return dex_cache->GetLocation()->ToModifiedUtf8(); } // Arrays and proxies are generated and have no corresponding dex file location. @@ -947,9 +944,9 @@ Class* Class::CopyOf(Thread* self, int32_t new_length, return new_class->AsClass(); } -bool Class::AnyProxyDescriptorEquals(const char* match) { - DCHECK(IsAnyProxyClass()); - return Runtime::Current()->GetClassLinker()->GetDescriptorForAnyProxy(this) == match; +bool Class::ProxyDescriptorEquals(const char* match) { + DCHECK(IsProxyClass()); + return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match; } // TODO: Move this to java_lang_Class.cc? diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index fcfb4b96e8..0ab5b97d72 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -352,16 +352,8 @@ class MANAGED Class FINAL : public Object { static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - // Is this either a java.lang.reflect.Proxy or a boxed lambda (java.lang.LambdaProxy)? - // -- Most code doesn't need to make the distinction, and this is the preferred thing to check. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsAnyProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) { - return IsReflectProxyClass() || IsLambdaProxyClass(); - } - - // Is this a java.lang.reflect.Proxy ? - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsReflectProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) { // Read access flags without using getter as whether something is a proxy can be check in // any loaded state // TODO: switch to a check if the super class is java.lang.reflect.Proxy? @@ -369,17 +361,6 @@ class MANAGED Class FINAL : public Object { return (access_flags & kAccClassIsProxy) != 0; } - // Is this a boxed lambda (java.lang.LambdaProxy)? - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsLambdaProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) { - // Read access flags without using getter as whether something is a proxy can be check in - // any loaded state - // TODO: switch to a check if the super class is java.lang.reflect.Proxy? - uint32_t access_flags = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); - return (access_flags & kAccClassIsLambdaProxy) != 0; - } - - static MemberOffset PrimitiveTypeOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_); } @@ -696,8 +677,6 @@ class MANAGED Class FINAL : public Object { return MemberOffset(OFFSETOF_MEMBER(Class, super_class_)); } - // Returns the class's ClassLoader. - // A null value is returned if and only if this is a boot classpath class. ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_); @@ -1097,8 +1076,6 @@ class MANAGED Class FINAL : public Object { bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_); - // Returns the backing DexFile's class definition for this class. - // This returns null if and only if the class has no backing DexFile. const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_); @@ -1125,15 +1102,11 @@ class MANAGED Class FINAL : public Object { size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - // For any proxy class only. Returns list of directly implemented interfaces. - // The value returned is always non-null. - ObjectArray<Class>* GetInterfacesForAnyProxy() SHARED_REQUIRES(Locks::mutator_lock_); + // For proxy class only. + ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_); - // For any proxy class only. Returns a 2d array of classes. - // -- The 0th dimension correponds to the vtable index. - // -- The 1st dimension is a list of checked exception classes. - // The value returned is always non-null. - ObjectArray<ObjectArray<Class>>* GetThrowsForAnyProxy() SHARED_REQUIRES(Locks::mutator_lock_); + // For proxy class only. + ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_); // For reference class only. MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_); @@ -1221,7 +1194,7 @@ class MANAGED Class FINAL : public Object { IterationRange<StrideIterator<ArtField>> GetIFieldsUnchecked() SHARED_REQUIRES(Locks::mutator_lock_); - bool AnyProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_); + bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_); // Check that the pointer size matches the one in the class linker. ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size); diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h index 313911706e..e22ddd7e90 100644 --- a/runtime/mirror/class_loader-inl.h +++ b/runtime/mirror/class_loader-inl.h @@ -21,7 +21,6 @@ #include "base/mutex-inl.h" #include "class_table-inl.h" -#include "lambda/box_class_table-inl.h" namespace art { namespace mirror { @@ -36,10 +35,6 @@ inline void ClassLoader::VisitReferences(mirror::Class* klass, const Visitor& vi if (class_table != nullptr) { class_table->VisitRoots(visitor); } - lambda::BoxClassTable* const lambda_box_class_table = GetLambdaProxyCache(); - if (lambda_box_class_table != nullptr) { - lambda_box_class_table->VisitRoots(visitor); - } } } // namespace mirror diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h index 9d4fe9654d..c2a65d62e2 100644 --- a/runtime/mirror/class_loader.h +++ b/runtime/mirror/class_loader.h @@ -24,12 +24,6 @@ namespace art { struct ClassLoaderOffsets; class ClassTable; -namespace lambda { - -class BoxClassTable; - -} // namespace lambda - namespace mirror { class Class; @@ -66,16 +60,6 @@ class MANAGED ClassLoader : public Object { reinterpret_cast<uint64_t>(allocator)); } - lambda::BoxClassTable* GetLambdaProxyCache() SHARED_REQUIRES(Locks::mutator_lock_) { - return reinterpret_cast<lambda::BoxClassTable*>( - GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, lambda_proxy_cache_))); - } - - void SetLambdaProxyCache(lambda::BoxClassTable* cache) SHARED_REQUIRES(Locks::mutator_lock_) { - SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, lambda_proxy_cache_), - reinterpret_cast<uint64_t>(cache)); - } - private: // Visit instance fields of the class loader as well as its associated classes. // Null class loader is handled by ClassLinker::VisitClassRoots. @@ -92,7 +76,6 @@ class MANAGED ClassLoader : public Object { uint32_t padding_ ATTRIBUTE_UNUSED; uint64_t allocator_; uint64_t class_table_; - uint64_t lambda_proxy_cache_; friend struct art::ClassLoaderOffsets; // for verifying offset information friend class Object; // For VisitReferences diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h index 49c443ef29..8a0daec4c2 100644 --- a/runtime/mirror/field-inl.h +++ b/runtime/mirror/field-inl.h @@ -57,15 +57,14 @@ inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field, const auto pointer_size = kTransactionActive ? Runtime::Current()->GetClassLinker()->GetImagePointerSize() : sizeof(void*); auto dex_field_index = field->GetDexFieldIndex(); - if (field->GetDeclaringClass()->IsAnyProxyClass()) { + auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, pointer_size); + if (field->GetDeclaringClass()->IsProxyClass()) { DCHECK(field->IsStatic()); DCHECK_LT(dex_field_index, 2U); // The two static fields (interfaces, throws) of all proxy classes // share the same dex file indices 0 and 1. So, we can't resolve // them in the dex cache. } else { - ArtField* resolved_field = - field->GetDexCache()->GetResolvedField(dex_field_index, pointer_size); if (resolved_field != nullptr) { DCHECK_EQ(resolved_field, field); } else { diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc index b02e5b5614..ff6847cf7a 100644 --- a/runtime/mirror/field.cc +++ b/runtime/mirror/field.cc @@ -56,7 +56,7 @@ void Field::VisitRoots(RootVisitor* visitor) { ArtField* Field::GetArtField() { mirror::Class* declaring_class = GetDeclaringClass(); - if (UNLIKELY(declaring_class->IsAnyProxyClass())) { + if (UNLIKELY(declaring_class->IsProxyClass())) { DCHECK(IsStatic()); DCHECK_EQ(declaring_class->NumStaticFields(), 2U); // 0 == Class[] interfaces; 1 == Class[][] throws; diff --git a/runtime/mirror/lambda_proxy.h b/runtime/mirror/lambda_proxy.h deleted file mode 100644 index cff3a12166..0000000000 --- a/runtime/mirror/lambda_proxy.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_MIRROR_LAMBDA_PROXY_H_ -#define ART_RUNTIME_MIRROR_LAMBDA_PROXY_H_ - -#include "lambda/closure.h" -#include "object.h" - -namespace art { - -struct LambdaProxyOffsets; - -namespace mirror { - -// C++ mirror of a lambda proxy. Does not yet have a Java-equivalent source file. -class MANAGED LambdaProxy FINAL : public Object { - public: - // Note that the runtime subclasses generate the following static fields: - - // private static java.lang.Class[] interfaces; // Declared interfaces for the lambda interface. - static constexpr size_t kStaticFieldIndexInterfaces = 0; - // private static java.lang.Class[][] throws; // Maps vtable id to list of classes. - static constexpr size_t kStaticFieldIndexThrows = 1; - static constexpr size_t kStaticFieldCount = 2; // Number of fields total. - - // The offset from the start of 'LambdaProxy' object, to the closure_ field, in bytes. - // -- This is exposed publically in order to avoid exposing 'closure_' publically. - // -- Only meant to be used in stubs and other compiled code, not in runtime. - static inline MemberOffset GetInstanceFieldOffsetClosure() { - return OFFSET_OF_OBJECT_MEMBER(LambdaProxy, closure_); - } - - // Direct methods available on the class: - static constexpr size_t kDirectMethodIndexConstructor = 0; // <init>()V - static constexpr size_t kDirectMethodCount = 1; // Only the constructor. - - // Accessors to the fields: - - // Get the native closure pointer. Usually non-null outside of lambda proxy contexts. - lambda::Closure* GetClosure() SHARED_REQUIRES(Locks::mutator_lock_) { - return reinterpret_cast<lambda::Closure*>( - GetField64(GetInstanceFieldOffsetClosure())); - } - - // Set the native closure pointer. Usually should be non-null outside of lambda proxy contexts. - void SetClosure(lambda::Closure* closure) SHARED_REQUIRES(Locks::mutator_lock_) { - SetField64<false>(GetInstanceFieldOffsetClosure(), - reinterpret_cast<uint64_t>(closure)); - } - - private: - // Instance fields, present in the base class and every generated subclass: - - // private long closure; - union { - lambda::Closure* actual; - uint64_t padding; // Don't trip up GetObjectSize checks, since the Java code has a long. - } closure_; - - // Friends for generating offset tests: - friend struct art::LambdaProxyOffsets; // for verifying offset information - - DISALLOW_IMPLICIT_CONSTRUCTORS(LambdaProxy); -}; - -} // namespace mirror -} // namespace art - -#endif // ART_RUNTIME_MIRROR_LAMBDA_PROXY_H_ diff --git a/runtime/modifiers.h b/runtime/modifiers.h index 36aa57fac3..9946eabc82 100644 --- a/runtime/modifiers.h +++ b/runtime/modifiers.h @@ -54,8 +54,6 @@ static constexpr uint32_t kAccDefault = 0x00400000; // method (run // if any particular method needs to be a default conflict. Used to figure out at runtime if // invoking this method will throw an exception. static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime) -// Set by the class linker when creating a class that's a subtype of LambdaProxy. -static constexpr uint32_t kAccClassIsLambdaProxy = 0x01000000; // class (dex only) // Special runtime-only flags. // Interface and all its super-interfaces with default methods have been recursively initialized. diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index 6cebd4d34b..5e423920c0 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -103,7 +103,7 @@ static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) { static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); mirror::Class* c = DecodeClass(soa, javaThis); - return soa.AddLocalReference<jobjectArray>(c->GetInterfacesForAnyProxy()->Clone(soa.Self())); + return soa.AddLocalReference<jobjectArray>(c->GetInterfaces()->Clone(soa.Self())); } static mirror::ObjectArray<mirror::Field>* GetDeclaredFields( @@ -489,7 +489,7 @@ static jobject Class_getDeclaredAnnotation(JNIEnv* env, jobject javaThis, jclass ScopedFastNativeObjectAccess soa(env); StackHandleScope<2> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); @@ -501,7 +501,7 @@ static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis) ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { // Return an empty array instead of a null pointer. mirror::Class* annotation_array_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array); @@ -517,7 +517,7 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) { StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); mirror::ObjectArray<mirror::Class>* classes = nullptr; - if (!klass->IsAnyProxyClass() && klass->GetDexCache() != nullptr) { + if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) { classes = klass->GetDexFile().GetDeclaredClasses(klass); } if (classes == nullptr) { @@ -543,7 +543,7 @@ static jclass Class_getEnclosingClass(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } return soa.AddLocalReference<jclass>(klass->GetDexFile().GetEnclosingClass(klass)); @@ -553,7 +553,7 @@ static jobject Class_getEnclosingConstructorNative(JNIEnv* env, jobject javaThis ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass); @@ -570,7 +570,7 @@ static jobject Class_getEnclosingMethodNative(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass); @@ -587,7 +587,7 @@ static jint Class_getInnerClassFlags(JNIEnv* env, jobject javaThis, jint default ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return defaultValue; } uint32_t flags; @@ -601,7 +601,7 @@ static jstring Class_getInnerClassName(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } mirror::String* class_name = nullptr; @@ -615,7 +615,7 @@ static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return false; } mirror::String* class_name = nullptr; @@ -630,7 +630,7 @@ static jboolean Class_isDeclaredAnnotationPresent(JNIEnv* env, jobject javaThis, ScopedFastNativeObjectAccess soa(env); StackHandleScope<2> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return false; } Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); @@ -641,7 +641,7 @@ static jclass Class_getDeclaringClass(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); - if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) { + if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } // Return null for anonymous classes. diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index 9166eccb0c..aac800a35a 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -419,7 +419,7 @@ static jobject Field_getAnnotationNative(JNIEnv* env, jobject javaField, jclass ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField(); - if (field->GetDeclaringClass()->IsAnyProxyClass()) { + if (field->GetDeclaringClass()->IsProxyClass()) { return nullptr; } Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); @@ -429,7 +429,7 @@ static jobject Field_getAnnotationNative(JNIEnv* env, jobject javaField, jclass static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) { ScopedFastNativeObjectAccess soa(env); ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField(); - if (field->GetDeclaringClass()->IsAnyProxyClass()) { + if (field->GetDeclaringClass()->IsProxyClass()) { // Return an empty array instead of a null pointer. mirror::Class* annotation_array_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array); @@ -443,7 +443,7 @@ static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) static jobjectArray Field_getSignatureAnnotation(JNIEnv* env, jobject javaField) { ScopedFastNativeObjectAccess soa(env); ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField(); - if (field->GetDeclaringClass()->IsAnyProxyClass()) { + if (field->GetDeclaringClass()->IsProxyClass()) { return nullptr; } return soa.AddLocalReference<jobjectArray>( @@ -455,7 +455,7 @@ static jboolean Field_isAnnotationPresentNative(JNIEnv* env, jobject javaField, ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField(); - if (field->GetDeclaringClass()->IsAnyProxyClass()) { + if (field->GetDeclaringClass()->IsProxyClass()) { return false; } Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc index 7894c9bcfc..caacba6ec3 100644 --- a/runtime/native/java_lang_reflect_Method.cc +++ b/runtime/native/java_lang_reflect_Method.cc @@ -32,7 +32,7 @@ namespace art { static jobject Method_getAnnotationNative(JNIEnv* env, jobject javaMethod, jclass annotationType) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - if (method->GetDeclaringClass()->IsAnyProxyClass()) { + if (method->GetDeclaringClass()->IsProxyClass()) { return nullptr; } StackHandleScope<1> hs(soa.Self()); @@ -44,7 +44,7 @@ static jobject Method_getAnnotationNative(JNIEnv* env, jobject javaMethod, jclas static jobjectArray Method_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - if (method->GetDeclaringClass()->IsAnyProxyClass()) { + if (method->GetDeclaringClass()->IsProxyClass()) { // Return an empty array instead of a null pointer. mirror::Class* annotation_array_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array); @@ -67,7 +67,7 @@ static jobject Method_getDefaultValue(JNIEnv* env, jobject javaMethod) { static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - if (method->GetDeclaringClass()->IsAnyProxyClass()) { + if (method->GetDeclaringClass()->IsProxyClass()) { mirror::Class* klass = method->GetDeclaringClass(); int throws_index = -1; size_t i = 0; @@ -79,8 +79,7 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) { ++i; } CHECK_NE(throws_index, -1); - mirror::ObjectArray<mirror::Class>* declared_exceptions = - klass->GetThrowsForAnyProxy()->Get(throws_index); + mirror::ObjectArray<mirror::Class>* declared_exceptions = klass->GetThrows()->Get(throws_index); return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self())); } else { mirror::ObjectArray<mirror::Class>* result_array = @@ -105,7 +104,7 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) { static jobjectArray Method_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - if (method->GetDeclaringClass()->IsAnyProxyClass()) { + if (method->GetDeclaringClass()->IsProxyClass()) { return nullptr; } return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetParameterAnnotations(method)); @@ -121,7 +120,7 @@ static jboolean Method_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod jclass annotationType) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - if (method->GetDeclaringClass()->IsAnyProxyClass()) { + if (method->GetDeclaringClass()->IsProxyClass()) { return false; } StackHandleScope<1> hs(soa.Self()); diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc index 647cec0010..4a6ab404f2 100644 --- a/runtime/native/java_lang_reflect_Proxy.cc +++ b/runtime/native/java_lang_reflect_Proxy.cc @@ -27,31 +27,15 @@ namespace art { static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring name, jobjectArray interfaces, - jobject loader, jobjectArray methods, jobjectArray throws, - jboolean is_lambda_proxy) { + jobject loader, jobjectArray methods, jobjectArray throws) { ScopedFastNativeObjectAccess soa(env); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - - mirror::Class* proxy_class = nullptr; - - if (UNLIKELY(is_lambda_proxy)) { - bool already_exists; // XX: Perhaps add lambdaProxyCache to java.lang.ClassLoader ? - proxy_class = class_linker->CreateLambdaProxyClass(soa, - name, - interfaces, - loader, - methods, - throws, - /*out*/&already_exists); - } else { - proxy_class = class_linker->CreateProxyClass(soa, name, interfaces, loader, methods, throws); - } - - return soa.AddLocalReference<jclass>(proxy_class); + return soa.AddLocalReference<jclass>(class_linker->CreateProxyClass( + soa, name, interfaces, loader, methods, throws)); } static JNINativeMethod gMethods[] = { - NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/Method;[[Ljava/lang/Class;Z)Ljava/lang/Class;"), + NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/Method;[[Ljava/lang/Class;)Ljava/lang/Class;"), }; void register_java_lang_reflect_Proxy(JNIEnv* env) { diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc index 57aafcc519..57472adb64 100644 --- a/runtime/proxy_test.cc +++ b/runtime/proxy_test.cc @@ -121,7 +121,7 @@ TEST_F(ProxyTest, ProxyClassHelper) { GenerateProxyClass(soa, jclass_loader, "$Proxy1234", interfaces))); interfaces.clear(); // Don't least possibly stale objects in the array as good practice. ASSERT_TRUE(proxy_class.Get() != nullptr); - ASSERT_TRUE(proxy_class->IsReflectProxyClass()); + ASSERT_TRUE(proxy_class->IsProxyClass()); ASSERT_TRUE(proxy_class->IsInitialized()); EXPECT_EQ(2U, proxy_class->NumDirectInterfaces()); // Interfaces$I and Interfaces$J. @@ -157,7 +157,7 @@ TEST_F(ProxyTest, ProxyFieldHelper) { } ASSERT_TRUE(proxyClass.Get() != nullptr); - ASSERT_TRUE(proxyClass->IsReflectProxyClass()); + ASSERT_TRUE(proxyClass->IsProxyClass()); ASSERT_TRUE(proxyClass->IsInitialized()); EXPECT_TRUE(proxyClass->GetIFieldsPtr() == nullptr); @@ -208,10 +208,10 @@ TEST_F(ProxyTest, CheckArtMirrorFieldsOfProxyStaticFields) { } ASSERT_TRUE(proxyClass0.Get() != nullptr); - ASSERT_TRUE(proxyClass0->IsReflectProxyClass()); + ASSERT_TRUE(proxyClass0->IsProxyClass()); ASSERT_TRUE(proxyClass0->IsInitialized()); ASSERT_TRUE(proxyClass1.Get() != nullptr); - ASSERT_TRUE(proxyClass1->IsReflectProxyClass()); + ASSERT_TRUE(proxyClass1->IsProxyClass()); ASSERT_TRUE(proxyClass1->IsInitialized()); LengthPrefixedArray<ArtField>* static_fields0 = proxyClass0->GetSFieldsPtr(); diff --git a/runtime/stack.cc b/runtime/stack.cc index 2ff9fd2835..9098d38bb0 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -172,23 +172,12 @@ mirror::Object* StackVisitor::GetThisObject() const { } else { return cur_shadow_frame_->GetVRegReference(0); } - } else if (m->IsReflectProxyMethod()) { + } else if (m->IsProxyMethod()) { if (cur_quick_frame_ != nullptr) { return artQuickGetProxyThisObject(cur_quick_frame_); } else { return cur_shadow_frame_->GetVRegReference(0); } - } else if (m->IsLambdaProxyMethod()) { - if (cur_quick_frame_ != nullptr) { - // XX: Should be safe to return null here, the lambda proxies - // don't set up their own quick frame because they don't need to spill any registers. - // By the time we are executing inside of the final target of the proxy invoke, - // the original 'this' reference is no longer live. - LOG(WARNING) << "Lambda proxies don't have a quick frame, do they?!"; - return nullptr; - } else { - return cur_shadow_frame_->GetVRegReference(0); - } } else { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { @@ -825,27 +814,7 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const { // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader. DCHECK(!method->IsDirect() && !method->IsConstructor()) << "Constructors of proxy classes must have a OatQuickMethodHeader"; - - if (method->IsReflectProxyMethod()) { - return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); - } else if (method->IsLambdaProxyMethod()) { - // Set this to true later once every stub works without a frame. - // This is currently 'false' because using a closure as a "long" - // requires a quick frame to be set up on 32-bit architectures. - constexpr bool kLambdaProxyStubsSupportFrameless = false; - if (kIsDebugBuild || !kLambdaProxyStubsSupportFrameless) { - // When debugging we always use the 'RefAndArgs' quick frame to allow us - // to see a runtime stub when unwinding. - return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); - } else { - // Lambda proxies don't bother setting up a quick frame for release builds. - LOG(FATAL) << "Requested QuickMethodFrameInfo for a lambda proxy," - << "but it doesn't have one, for method: " << PrettyMethod(method); - UNREACHABLE(); - } - } else { - LOG(FATAL) << "Unknown type of proxy method " << PrettyMethod(method); - } + return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); } // The only remaining case is if the method is native and uses the generic JNI stub. diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java index d5fed2adfe..6151fc10f2 100644 --- a/test/458-checker-instruction-simplification/src/Main.java +++ b/test/458-checker-instruction-simplification/src/Main.java @@ -389,24 +389,6 @@ public class Main { return arg << 0; } - /// CHECK-START: int Main.Shl1(int) instruction_simplifier (before) - /// CHECK-DAG: <<Arg:i\d+>> ParameterValue - /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 - /// CHECK-DAG: <<Shl:i\d+>> Shl [<<Arg>>,<<Const1>>] - /// CHECK-DAG: Return [<<Shl>>] - - /// CHECK-START: int Main.Shl1(int) instruction_simplifier (after) - /// CHECK-DAG: <<Arg:i\d+>> ParameterValue - /// CHECK-DAG: <<Add:i\d+>> Add [<<Arg>>,<<Arg>>] - /// CHECK-DAG: Return [<<Add>>] - - /// CHECK-START: int Main.Shl1(int) instruction_simplifier (after) - /// CHECK-NOT: Shl - - public static int Shl1(int arg) { - return arg << 1; - } - /// CHECK-START: long Main.Shr0(long) instruction_simplifier (before) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 @@ -1245,7 +1227,6 @@ public class Main { return arg * 9; } - /** * Test strength reduction of factors of the form (2^n - 1). */ @@ -1265,6 +1246,91 @@ public class Main { return arg * 31; } + /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (before) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<Field:z\d+>> StaticFieldGet + /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Field>>,<<Const1>>] + /// CHECK-DAG: If [<<NE>>] + + /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (after) + /// CHECK-DAG: <<Field:z\d+>> StaticFieldGet + /// CHECK-DAG: <<Not:z\d+>> BooleanNot [<<Field>>] + /// CHECK-DAG: If [<<Not>>] + + public static int booleanFieldNotEqualOne() { + return (booleanField == true) ? 13 : 54; + } + + /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (before) + /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Field:z\d+>> StaticFieldGet + /// CHECK-DAG: <<EQ:z\d+>> Equal [<<Field>>,<<Const0>>] + /// CHECK-DAG: If [<<EQ>>] + + /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (after) + /// CHECK-DAG: <<Field:z\d+>> StaticFieldGet + /// CHECK-DAG: <<Not:z\d+>> BooleanNot [<<Field>>] + /// CHECK-DAG: If [<<Not>>] + + public static int booleanFieldEqualZero() { + return (booleanField != false) ? 13 : 54; + } + + /// CHECK-START: int Main.intConditionNotEqualOne(int) instruction_simplifier_after_bce (before) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 + /// CHECK-DAG: <<GT:z\d+>> GreaterThan [<<Arg>>,<<Const42>>] + /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<GT>>,<<Const1>>] + /// CHECK-DAG: If [<<NE>>] + + /// CHECK-START: int Main.intConditionNotEqualOne(int) instruction_simplifier_after_bce (after) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 + /// CHECK-DAG: If [<<LE:z\d+>>] + /// CHECK-DAG: <<LE>> LessThanOrEqual [<<Arg>>,<<Const42>>] + // Note that we match `LE` from If because there are two identical LessThanOrEqual instructions. + + public static int intConditionNotEqualOne(int i) { + return ((i > 42) == true) ? 13 : 54; + } + + /// CHECK-START: int Main.intConditionEqualZero(int) instruction_simplifier_after_bce (before) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 + /// CHECK-DAG: <<GT:z\d+>> GreaterThan [<<Arg>>,<<Const42>>] + /// CHECK-DAG: <<EQ:z\d+>> Equal [<<GT>>,<<Const0>>] + /// CHECK-DAG: If [<<EQ>>] + + /// CHECK-START: int Main.intConditionEqualZero(int) instruction_simplifier_after_bce (after) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 + /// CHECK-DAG: If [<<LE:z\d+>>] + /// CHECK-DAG: <<LE>> LessThanOrEqual [<<Arg>>,<<Const42>>] + // Note that we match `LE` from If because there are two identical LessThanOrEqual instructions. + + public static int intConditionEqualZero(int i) { + return ((i > 42) != false) ? 13 : 54; + } + + // Test that conditions on float/double are not flipped. + + /// CHECK-START: int Main.floatConditionNotEqualOne(float) register (before) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: NotEqual [{{i\d+}},<<Const1>>] + + public static int floatConditionNotEqualOne(float f) { + return ((f > 42.0f) == true) ? 13 : 54; + } + + /// CHECK-START: int Main.doubleConditionEqualZero(double) register (before) + /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 + /// CHECK-DAG: Equal [{{i\d+}},<<Const0>>] + + public static int doubleConditionEqualZero(double d) { + return ((d > 42.0) != false) ? 13 : 54; + } public static void main(String[] args) { int arg = 123456; @@ -1314,7 +1380,6 @@ public class Main { assertDoubleEquals(Div2(150.0), 75.0); assertFloatEquals(DivMP25(100.0f), -400.0f); assertDoubleEquals(DivMP25(150.0), -600.0); - assertLongEquals(Shl1(100), 200); assertIntEquals(UShr28And15(0xc1234567), 0xc); assertLongEquals(UShr60And15(0xc123456787654321L), 0xcL); assertIntEquals(UShr28And7(0xc1234567), 0x4); @@ -1333,5 +1398,22 @@ public class Main { assertLongEquals(62, mulPow2Minus1(2)); assertLongEquals(3100, mulPow2Minus1(100)); assertLongEquals(382695, mulPow2Minus1(12345)); - } + + booleanField = false; + assertIntEquals(booleanFieldNotEqualOne(), 54); + assertIntEquals(booleanFieldEqualZero(), 54); + booleanField = true; + assertIntEquals(booleanFieldNotEqualOne(), 13); + assertIntEquals(booleanFieldEqualZero(), 13); + assertIntEquals(intConditionNotEqualOne(6), 54); + assertIntEquals(intConditionNotEqualOne(43), 13); + assertIntEquals(intConditionEqualZero(6), 54); + assertIntEquals(intConditionEqualZero(43), 13); + assertIntEquals(floatConditionNotEqualOne(6.0f), 54); + assertIntEquals(floatConditionNotEqualOne(43.0f), 13); + assertIntEquals(doubleConditionEqualZero(6.0), 54); + assertIntEquals(doubleConditionEqualZero(43.0), 13); + } + + public static boolean booleanField; } diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java index 13c4722bc4..17e88ceb21 100644 --- a/test/530-checker-lse/src/Main.java +++ b/test/530-checker-lse/src/Main.java @@ -136,6 +136,9 @@ public class Main { // A new allocation shouldn't alias with pre-existing values. static int test3(TestClass obj) { + // Do an allocation here to avoid the HLoadClass and HClinitCheck + // at the second allocation. + new TestClass(); obj.i = 1; obj.next.j = 2; TestClass obj2 = new TestClass(); diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java index 12f0380df0..f791adfd9a 100644 --- a/test/538-checker-embed-constants/src/Main.java +++ b/test/538-checker-embed-constants/src/Main.java @@ -260,26 +260,43 @@ public class Main { return arg ^ 0xf00000000000000fL; } + /// CHECK-START-ARM: long Main.shl1(long) disassembly (after) + /// CHECK: lsls{{(\.w)?}} {{r\d+}}, {{r\d+}}, #1 + /// CHECK: adc{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}} + + /// CHECK-START-ARM: long Main.shl1(long) disassembly (after) + /// CHECK-NOT: lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + + /// CHECK-START-X86: long Main.shl1(long) disassembly (after) + /// CHECK: add + /// CHECK: adc + + /// CHECK-START-X86: long Main.shl1(long) disassembly (after) + /// CHECK-NOT: shl + + public static long shl1(long arg) { + return arg << 1; + } + /// CHECK-START-ARM: long Main.shl2(long) disassembly (after) - /// CHECK: lsl{{s?|.w}} <<oh:r\d+>>, {{r\d+}}, #2 + /// CHECK: lsl{{s?|\.w}} <<oh:r\d+>>, {{r\d+}}, #2 /// CHECK: orr.w <<oh>>, <<oh>>, <<low:r\d+>>, lsr #30 - /// CHECK-DAG: lsl{{s?|.w}} {{r\d+}}, <<low>>, #2 + /// CHECK: lsl{{s?|\.w}} {{r\d+}}, <<low>>, #2 /// CHECK-START-ARM: long Main.shl2(long) disassembly (after) - /// CHECK-NOT: lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shl2(long arg) { - // Note: Shl(x, 1) is transformed to Add(x, x), so test Shl(x, 2). return arg << 2; } /// CHECK-START-ARM: long Main.shl31(long) disassembly (after) - /// CHECK: lsl{{s?|.w}} <<oh:r\d+>>, {{r\d+}}, #31 + /// CHECK: lsl{{s?|\.w}} <<oh:r\d+>>, {{r\d+}}, #31 /// CHECK: orr.w <<oh>>, <<oh>>, <<low:r\d+>>, lsr #1 - /// CHECK: lsl{{s?|.w}} {{r\d+}}, <<low>>, #31 + /// CHECK: lsl{{s?|\.w}} {{r\d+}}, <<low>>, #31 /// CHECK-START-ARM: long Main.shl31(long) disassembly (after) - /// CHECK-NOT: lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shl31(long arg) { return arg << 31; @@ -287,114 +304,136 @@ public class Main { /// CHECK-START-ARM: long Main.shl32(long) disassembly (after) /// CHECK-DAG: mov {{r\d+}}, {{r\d+}} - /// CHECK-DAG: mov{{s?|.w}} {{r\d+}}, #0 + /// CHECK-DAG: mov{{s?|\.w}} {{r\d+}}, #0 /// CHECK-START-ARM: long Main.shl32(long) disassembly (after) - /// CHECK-NOT: lsl{{s?|.w}} + /// CHECK-NOT: lsl{{s?|\.w}} public static long shl32(long arg) { return arg << 32; } /// CHECK-START-ARM: long Main.shl33(long) disassembly (after) - /// CHECK-DAG: lsl{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #1 - /// CHECK-DAG: mov{{s?|.w}} {{r\d+}}, #0 + /// CHECK-DAG: lsl{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #1 + /// CHECK-DAG: mov{{s?|\.w}} {{r\d+}}, #0 /// CHECK-START-ARM: long Main.shl33(long) disassembly (after) - /// CHECK-NOT: lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shl33(long arg) { return arg << 33; } /// CHECK-START-ARM: long Main.shl63(long) disassembly (after) - /// CHECK-DAG: lsl{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #31 - /// CHECK-DAG: mov{{s?|.w}} {{r\d+}}, #0 + /// CHECK-DAG: lsl{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #31 + /// CHECK-DAG: mov{{s?|\.w}} {{r\d+}}, #0 /// CHECK-START-ARM: long Main.shl63(long) disassembly (after) - /// CHECK-NOT: lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shl63(long arg) { return arg << 63; } /// CHECK-START-ARM: long Main.shr1(long) disassembly (after) - /// CHECK: lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #1 - /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #31 - /// CHECK-DAG: asr{{s?|.w}} {{r\d+}}, <<high>>, #1 + /// CHECK: asrs{{(\.w)?}} {{r\d+}}, {{r\d+}}, #1 + /// CHECK: mov.w {{r\d+}}, {{r\d+}}, rrx /// CHECK-START-ARM: long Main.shr1(long) disassembly (after) - /// CHECK-NOT: asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shr1(long arg) { return arg >> 1; } + /// CHECK-START-ARM: long Main.shr2(long) disassembly (after) + /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #2 + /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #30 + /// CHECK-DAG: asr{{s?|\.w}} {{r\d+}}, <<high>>, #2 + + /// CHECK-START-ARM: long Main.shr2(long) disassembly (after) + /// CHECK-NOT: asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + + public static long shr2(long arg) { + return arg >> 2; + } + /// CHECK-START-ARM: long Main.shr31(long) disassembly (after) - /// CHECK: lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #31 + /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #31 /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1 - /// CHECK: asr{{s?|.w}} {{r\d+}}, <<high>>, #31 + /// CHECK: asr{{s?|\.w}} {{r\d+}}, <<high>>, #31 /// CHECK-START-ARM: long Main.shr31(long) disassembly (after) - /// CHECK-NOT: asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shr31(long arg) { return arg >> 31; } /// CHECK-START-ARM: long Main.shr32(long) disassembly (after) - /// CHECK-DAG: asr{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #31 + /// CHECK-DAG: asr{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #31 /// CHECK-DAG: mov {{r\d+}}, <<high>> /// CHECK-START-ARM: long Main.shr32(long) disassembly (after) - /// CHECK-NOT: asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} - /// CHECK-NOT: lsr{{s?|.w}} + /// CHECK-NOT: asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsr{{s?|\.w}} public static long shr32(long arg) { return arg >> 32; } /// CHECK-START-ARM: long Main.shr33(long) disassembly (after) - /// CHECK-DAG: asr{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #1 - /// CHECK-DAG: asr{{s?|.w}} {{r\d+}}, <<high>>, #31 + /// CHECK-DAG: asr{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #1 + /// CHECK-DAG: asr{{s?|\.w}} {{r\d+}}, <<high>>, #31 /// CHECK-START-ARM: long Main.shr33(long) disassembly (after) - /// CHECK-NOT: asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shr33(long arg) { return arg >> 33; } /// CHECK-START-ARM: long Main.shr63(long) disassembly (after) - /// CHECK-DAG: asr{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #31 - /// CHECK-DAG: asr{{s?|.w}} {{r\d+}}, <<high>>, #31 + /// CHECK-DAG: asr{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #31 + /// CHECK-DAG: asr{{s?|\.w}} {{r\d+}}, <<high>>, #31 /// CHECK-START-ARM: long Main.shr63(long) disassembly (after) - /// CHECK-NOT: asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long shr63(long arg) { return arg >> 63; } /// CHECK-START-ARM: long Main.ushr1(long) disassembly (after) - /// CHECK: lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #1 - /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #31 - /// CHECK-DAG: lsr{{s?|.w}} {{r\d+}}, <<high>>, #1 + /// CHECK: lsrs{{|.w}} {{r\d+}}, {{r\d+}}, #1 + /// CHECK: mov.w {{r\d+}}, {{r\d+}}, rrx /// CHECK-START-ARM: long Main.ushr1(long) disassembly (after) - /// CHECK-NOT: lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long ushr1(long arg) { return arg >>> 1; } + /// CHECK-START-ARM: long Main.ushr2(long) disassembly (after) + /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #2 + /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #30 + /// CHECK-DAG: lsr{{s?|\.w}} {{r\d+}}, <<high>>, #2 + + /// CHECK-START-ARM: long Main.ushr2(long) disassembly (after) + /// CHECK-NOT: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + + public static long ushr2(long arg) { + return arg >>> 2; + } + /// CHECK-START-ARM: long Main.ushr31(long) disassembly (after) - /// CHECK: lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #31 + /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #31 /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1 - /// CHECK: lsr{{s?|.w}} {{r\d+}}, <<high>>, #31 + /// CHECK: lsr{{s?|\.w}} {{r\d+}}, <<high>>, #31 /// CHECK-START-ARM: long Main.ushr31(long) disassembly (after) - /// CHECK-NOT: lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long ushr31(long arg) { return arg >>> 31; @@ -402,32 +441,32 @@ public class Main { /// CHECK-START-ARM: long Main.ushr32(long) disassembly (after) /// CHECK-DAG: mov {{r\d+}}, {{r\d+}} - /// CHECK-DAG: mov{{s?|.w}} {{r\d+}}, #0 + /// CHECK-DAG: mov{{s?|\.w}} {{r\d+}}, #0 /// CHECK-START-ARM: long Main.ushr32(long) disassembly (after) - /// CHECK-NOT: lsr{{s?|.w}} + /// CHECK-NOT: lsr{{s?|\.w}} public static long ushr32(long arg) { return arg >>> 32; } /// CHECK-START-ARM: long Main.ushr33(long) disassembly (after) - /// CHECK-DAG: lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, #1 - /// CHECK-DAG: mov{{s?|.w}} {{r\d+}}, #0 + /// CHECK-DAG: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, #1 + /// CHECK-DAG: mov{{s?|\.w}} {{r\d+}}, #0 /// CHECK-START-ARM: long Main.ushr33(long) disassembly (after) - /// CHECK-NOT: lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long ushr33(long arg) { return arg >>> 33; } /// CHECK-START-ARM: long Main.ushr63(long) disassembly (after) - /// CHECK-DAG: lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, #31 - /// CHECK-DAG: mov{{s?|.w}} {{r\d+}}, #0 + /// CHECK-DAG: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, #31 + /// CHECK-DAG: mov{{s?|\.w}} {{r\d+}}, #0 /// CHECK-START-ARM: long Main.ushr63(long) disassembly (after) - /// CHECK-NOT: lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} + /// CHECK-NOT: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static long ushr63(long arg) { return arg >>> 63; @@ -485,11 +524,13 @@ public class Main { assertLongEquals(14, addM1(7)); + assertLongEquals(shl1(longArg), 0x2468acf10eca8642L); assertLongEquals(shl2(longArg), 0x48d159e21d950c84L); assertLongEquals(shl31(longArg), 0x43b2a19080000000L); assertLongEquals(shl32(longArg), 0x8765432100000000L); assertLongEquals(shl33(longArg), 0x0eca864200000000L); assertLongEquals(shl63(longArg), 0x8000000000000000L); + assertLongEquals(shl1(~longArg), 0xdb97530ef13579bcL); assertLongEquals(shl2(~longArg), 0xb72ea61de26af378L); assertLongEquals(shl31(~longArg), 0xbc4d5e6f00000000L); assertLongEquals(shl32(~longArg), 0x789abcde00000000L); @@ -497,22 +538,26 @@ public class Main { assertLongEquals(shl63(~longArg), 0x0000000000000000L); assertLongEquals(shr1(longArg), 0x091a2b3c43b2a190L); + assertLongEquals(shr2(longArg), 0x048d159e21d950c8L); assertLongEquals(shr31(longArg), 0x000000002468acf1L); assertLongEquals(shr32(longArg), 0x0000000012345678L); assertLongEquals(shr33(longArg), 0x00000000091a2b3cL); assertLongEquals(shr63(longArg), 0x0000000000000000L); assertLongEquals(shr1(~longArg), 0xf6e5d4c3bc4d5e6fL); + assertLongEquals(shr2(~longArg), 0xfb72ea61de26af37L); assertLongEquals(shr31(~longArg), 0xffffffffdb97530eL); assertLongEquals(shr32(~longArg), 0xffffffffedcba987L); assertLongEquals(shr33(~longArg), 0xfffffffff6e5d4c3L); assertLongEquals(shr63(~longArg), 0xffffffffffffffffL); assertLongEquals(ushr1(longArg), 0x091a2b3c43b2a190L); + assertLongEquals(ushr2(longArg), 0x048d159e21d950c8L); assertLongEquals(ushr31(longArg), 0x000000002468acf1L); assertLongEquals(ushr32(longArg), 0x0000000012345678L); assertLongEquals(ushr33(longArg), 0x00000000091a2b3cL); assertLongEquals(ushr63(longArg), 0x0000000000000000L); assertLongEquals(ushr1(~longArg), 0x76e5d4c3bc4d5e6fL); + assertLongEquals(ushr2(~longArg), 0x3b72ea61de26af37L); assertLongEquals(ushr31(~longArg), 0x00000001db97530eL); assertLongEquals(ushr32(~longArg), 0x00000000edcba987L); assertLongEquals(ushr33(~longArg), 0x0000000076e5d4c3L); diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc new file mode 100644 index 0000000000..41083235d9 --- /dev/null +++ b/test/543-env-long-ref/env_long_ref.cc @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "arch/context.h" +#include "art_method-inl.h" +#include "jni.h" +#include "scoped_thread_state_change.h" +#include "stack.h" +#include "thread.h" + +namespace art { + +namespace { + +class TestVisitor : public StackVisitor { + public: + TestVisitor(const ScopedObjectAccess& soa, Context* context, jobject expected_value) + SHARED_REQUIRES(Locks::mutator_lock_) + : StackVisitor(soa.Self(), context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), + expected_value_(expected_value), + found_(false), + soa_(soa) {} + + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { + ArtMethod* m = GetMethod(); + std::string m_name(m->GetName()); + + if (m_name == "testCase") { + found_ = true; + uint32_t value = 0; + CHECK(GetVReg(m, 1, kReferenceVReg, &value)); + CHECK_EQ(reinterpret_cast<mirror::Object*>(value), + soa_.Decode<mirror::Object*>(expected_value_)); + } + return true; + } + + jobject expected_value_; + bool found_; + const ScopedObjectAccess& soa_; +}; + +} // namespace + +extern "C" JNIEXPORT void JNICALL Java_Main_lookForMyRegisters(JNIEnv*, jclass, jobject value) { + ScopedObjectAccess soa(Thread::Current()); + std::unique_ptr<Context> context(Context::Create()); + TestVisitor visitor(soa, context.get(), value); + visitor.WalkStack(); + CHECK(visitor.found_); +} + +} // namespace art diff --git a/test/543-env-long-ref/expected.txt b/test/543-env-long-ref/expected.txt new file mode 100644 index 0000000000..89f155b8c9 --- /dev/null +++ b/test/543-env-long-ref/expected.txt @@ -0,0 +1,2 @@ +JNI_OnLoad called +42 diff --git a/test/543-env-long-ref/info.txt b/test/543-env-long-ref/info.txt new file mode 100644 index 0000000000..6a4253364e --- /dev/null +++ b/test/543-env-long-ref/info.txt @@ -0,0 +1,3 @@ +Regression test for optimizing that used to not return +the right dex register in debuggable when a new value +was overwriting the high dex register of a wide value. diff --git a/test/543-env-long-ref/smali/TestCase.smali b/test/543-env-long-ref/smali/TestCase.smali new file mode 100644 index 0000000000..608d6eb96a --- /dev/null +++ b/test/543-env-long-ref/smali/TestCase.smali @@ -0,0 +1,26 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LTestCase; +.super Ljava/lang/Object; + +.method public static testCase()I + .registers 5 + const-wide/16 v0, 0x1 + invoke-static {v0, v1}, LMain;->$noinline$allocate(J)LMain; + move-result-object v1 + invoke-static {v1}, LMain;->lookForMyRegisters(LMain;)V + iget v2, v1, LMain;->field:I + return v2 +.end method diff --git a/runtime/lambda/box_class_table-inl.h b/test/543-env-long-ref/src/Main.java index 2fc34a7b2c..e723789ce2 100644 --- a/runtime/lambda/box_class_table-inl.h +++ b/test/543-env-long-ref/src/Main.java @@ -14,25 +14,29 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_ -#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_ +import java.lang.reflect.Method; -#include "lambda/box_class_table.h" -#include "thread.h" +public class Main { + // Workaround for b/18051191. + class InnerClass {} -namespace art { -namespace lambda { + public static void main(String[] args) throws Throwable { + System.loadLibrary(args[0]); + Class<?> c = Class.forName("TestCase"); + Method m = c.getMethod("testCase"); + Integer a = (Integer)m.invoke(null, (Object[]) null); + System.out.println(a); + } -template <typename Visitor> -inline void BoxClassTable::VisitRoots(const Visitor& visitor) { - MutexLock mu(Thread::Current(), *Locks::lambda_class_table_lock_); - for (std::pair<UnorderedMapKeyType, ValueType>& key_value : map_) { - ValueType& gc_root = key_value.second; - visitor.VisitRoot(gc_root.AddressWithoutBarrier()); + public static Main $noinline$allocate(long a) { + try { + return new Main(); + } catch (Exception e) { + throw new Error(e); + } } -} -} // namespace lambda -} // namespace art + public static native void lookForMyRegisters(Main m); -#endif // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_ + int field = 42; +} diff --git a/test/550-checker-multiply-accumulate/expected.txt b/test/550-checker-multiply-accumulate/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/550-checker-multiply-accumulate/expected.txt diff --git a/test/550-checker-multiply-accumulate/info.txt b/test/550-checker-multiply-accumulate/info.txt new file mode 100644 index 0000000000..10e998cb18 --- /dev/null +++ b/test/550-checker-multiply-accumulate/info.txt @@ -0,0 +1 @@ +Test the merging of instructions into the shifter operand on arm64. diff --git a/test/550-checker-multiply-accumulate/src/Main.java b/test/550-checker-multiply-accumulate/src/Main.java new file mode 100644 index 0000000000..2d0688d57e --- /dev/null +++ b/test/550-checker-multiply-accumulate/src/Main.java @@ -0,0 +1,234 @@ +/* +* Copyright (C) 2015 The Android Open Source Project +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +public class Main { + + // A dummy value to defeat inlining of these routines. + static boolean doThrow = false; + + public static void assertIntEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + public static void assertLongEquals(long expected, long result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + /** + * Test basic merging of `MUL+ADD` into `MULADD`. + */ + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm64 (before) + /// CHECK: <<Acc:i\d+>> ParameterValue + /// CHECK: <<Left:i\d+>> ParameterValue + /// CHECK: <<Right:i\d+>> ParameterValue + /// CHECK: <<Mul:i\d+>> Mul [<<Left>>,<<Right>>] + /// CHECK: <<Add:i\d+>> Add [<<Acc>>,<<Mul>>] + /// CHECK: Return [<<Add>>] + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm64 (after) + /// CHECK: <<Acc:i\d+>> ParameterValue + /// CHECK: <<Left:i\d+>> ParameterValue + /// CHECK: <<Right:i\d+>> ParameterValue + /// CHECK: <<MulAdd:i\d+>> Arm64MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Add + /// CHECK: Return [<<MulAdd>>] + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm64 (after) + /// CHECK-NOT: Mul + /// CHECK-NOT: Add + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) disassembly (after) + /// CHECK: madd w{{\d+}}, w{{\d+}}, w{{\d+}}, w{{\d+}} + + public static int $opt$noinline$mulAdd(int acc, int left, int right) { + if (doThrow) throw new Error(); + return acc + left * right; + } + + /** + * Test basic merging of `MUL+SUB` into `MULSUB`. + */ + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm64 (before) + /// CHECK: <<Acc:j\d+>> ParameterValue + /// CHECK: <<Left:j\d+>> ParameterValue + /// CHECK: <<Right:j\d+>> ParameterValue + /// CHECK: <<Mul:j\d+>> Mul [<<Left>>,<<Right>>] + /// CHECK: <<Sub:j\d+>> Sub [<<Acc>>,<<Mul>>] + /// CHECK: Return [<<Sub>>] + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm64 (after) + /// CHECK: <<Acc:j\d+>> ParameterValue + /// CHECK: <<Left:j\d+>> ParameterValue + /// CHECK: <<Right:j\d+>> ParameterValue + /// CHECK: <<MulSub:j\d+>> Arm64MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Sub + /// CHECK: Return [<<MulSub>>] + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm64 (after) + /// CHECK-NOT: Mul + /// CHECK-NOT: Sub + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) disassembly (after) + /// CHECK: msub x{{\d+}}, x{{\d+}}, x{{\d+}}, x{{\d+}} + + public static long $opt$noinline$mulSub(long acc, long left, long right) { + if (doThrow) throw new Error(); + return acc - left * right; + } + + /** + * Test that we do not create a multiply-accumulate instruction when there + * are other uses of the multiplication that cannot merge it. + */ + + /// CHECK-START-ARM64: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm64 (before) + /// CHECK: <<Acc:i\d+>> ParameterValue + /// CHECK: <<Left:i\d+>> ParameterValue + /// CHECK: <<Right:i\d+>> ParameterValue + /// CHECK: <<Mul:i\d+>> Mul [<<Left>>,<<Right>>] + /// CHECK: <<Add:i\d+>> Add [<<Acc>>,<<Mul>>] + /// CHECK: <<Or:i\d+>> Or [<<Mul>>,<<Add>>] + /// CHECK: Return [<<Or>>] + + /// CHECK-START-ARM64: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm64 (after) + /// CHECK: <<Acc:i\d+>> ParameterValue + /// CHECK: <<Left:i\d+>> ParameterValue + /// CHECK: <<Right:i\d+>> ParameterValue + /// CHECK: <<Mul:i\d+>> Mul [<<Left>>,<<Right>>] + /// CHECK: <<Add:i\d+>> Add [<<Acc>>,<<Mul>>] + /// CHECK: <<Or:i\d+>> Or [<<Mul>>,<<Add>>] + /// CHECK: Return [<<Or>>] + + /// CHECK-START-ARM64: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm64 (after) + /// CHECK-NOT: Arm64MultiplyAccumulate + + public static int $opt$noinline$multipleUses1(int acc, int left, int right) { + if (doThrow) throw new Error(); + int temp = left * right; + return temp | (acc + temp); + } + + /** + * Test that we do not create a multiply-accumulate instruction even when all + * uses of the multiplication can merge it. + */ + + /// CHECK-START-ARM64: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm64 (before) + /// CHECK: <<Acc:j\d+>> ParameterValue + /// CHECK: <<Left:j\d+>> ParameterValue + /// CHECK: <<Right:j\d+>> ParameterValue + /// CHECK: <<Mul:j\d+>> Mul [<<Left>>,<<Right>>] + /// CHECK: <<Add:j\d+>> Add [<<Acc>>,<<Mul>>] + /// CHECK: <<Sub:j\d+>> Sub [<<Acc>>,<<Mul>>] + /// CHECK: <<Res:j\d+>> Add [<<Add>>,<<Sub>>] + /// CHECK: Return [<<Res>>] + + /// CHECK-START-ARM64: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm64 (after) + /// CHECK: <<Acc:j\d+>> ParameterValue + /// CHECK: <<Left:j\d+>> ParameterValue + /// CHECK: <<Right:j\d+>> ParameterValue + /// CHECK: <<Mul:j\d+>> Mul [<<Left>>,<<Right>>] + /// CHECK: <<Add:j\d+>> Add [<<Acc>>,<<Mul>>] + /// CHECK: <<Sub:j\d+>> Sub [<<Acc>>,<<Mul>>] + /// CHECK: <<Res:j\d+>> Add [<<Add>>,<<Sub>>] + /// CHECK: Return [<<Res>>] + + /// CHECK-START-ARM64: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm64 (after) + /// CHECK-NOT: Arm64MultiplyAccumulate + + + public static long $opt$noinline$multipleUses2(long acc, long left, long right) { + if (doThrow) throw new Error(); + long temp = left * right; + return (acc + temp) + (acc - temp); + } + + + /** + * Test the interpretation of `a * (b + 1)` as `a + (a * b)`. + */ + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (before) + /// CHECK: <<Acc:i\d+>> ParameterValue + /// CHECK: <<Var:i\d+>> ParameterValue + /// CHECK: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<Add:i\d+>> Add [<<Var>>,<<Const1>>] + /// CHECK: <<Mul:i\d+>> Mul [<<Acc>>,<<Add>>] + /// CHECK: Return [<<Mul>>] + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (after) + /// CHECK: <<Acc:i\d+>> ParameterValue + /// CHECK: <<Var:i\d+>> ParameterValue + /// CHECK: <<MulAdd:i\d+>> Arm64MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Add + /// CHECK: Return [<<MulAdd>>] + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (after) + /// CHECK-NOT: Mul + /// CHECK-NOT: Add + + /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) disassembly (after) + /// CHECK: madd w{{\d+}}, w{{\d+}}, w{{\d+}}, w{{\d+}} + + public static int $opt$noinline$mulPlusOne(int acc, int var) { + if (doThrow) throw new Error(); + return acc * (var + 1); + } + + + /** + * Test the interpretation of `a * (1 - b)` as `a - (a * b)`. + */ + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (before) + /// CHECK: <<Acc:j\d+>> ParameterValue + /// CHECK: <<Var:j\d+>> ParameterValue + /// CHECK: <<Const1:j\d+>> LongConstant 1 + /// CHECK: <<Sub:j\d+>> Sub [<<Const1>>,<<Var>>] + /// CHECK: <<Mul:j\d+>> Mul [<<Acc>>,<<Sub>>] + /// CHECK: Return [<<Mul>>] + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (after) + /// CHECK: <<Acc:j\d+>> ParameterValue + /// CHECK: <<Var:j\d+>> ParameterValue + /// CHECK: <<MulSub:j\d+>> Arm64MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Sub + /// CHECK: Return [<<MulSub>>] + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (after) + /// CHECK-NOT: Mul + /// CHECK-NOT: Sub + + /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) disassembly (after) + /// CHECK: msub x{{\d+}}, x{{\d+}}, x{{\d+}}, x{{\d+}} + + public static long $opt$noinline$mulMinusOne(long acc, long var) { + if (doThrow) throw new Error(); + return acc * (1 - var); + } + + + public static void main(String[] args) { + assertIntEquals(7, $opt$noinline$mulAdd(1, 2, 3)); + assertLongEquals(-26, $opt$noinline$mulSub(4, 5, 6)); + assertIntEquals(79, $opt$noinline$multipleUses1(7, 8, 9)); + assertLongEquals(20, $opt$noinline$multipleUses2(10, 11, 12)); + assertIntEquals(195, $opt$noinline$mulPlusOne(13, 14)); + assertLongEquals(-225, $opt$noinline$mulMinusOne(15, 16)); + } +} diff --git a/test/550-checker-regression-wide-store/expected.txt b/test/550-checker-regression-wide-store/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/550-checker-regression-wide-store/expected.txt diff --git a/test/550-checker-regression-wide-store/info.txt b/test/550-checker-regression-wide-store/info.txt new file mode 100644 index 0000000000..6cf04bc35a --- /dev/null +++ b/test/550-checker-regression-wide-store/info.txt @@ -0,0 +1,3 @@ +Test an SsaBuilder regression where storing into the high vreg of a pair +would not invalidate the low vreg. The resulting environment would generate +an incorrect stack map, causing deopt and try/catch to use a wrong location.
\ No newline at end of file diff --git a/test/550-checker-regression-wide-store/smali/TestCase.smali b/test/550-checker-regression-wide-store/smali/TestCase.smali new file mode 100644 index 0000000000..7974d56a8f --- /dev/null +++ b/test/550-checker-regression-wide-store/smali/TestCase.smali @@ -0,0 +1,82 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LTestCase; +.super Ljava/lang/Object; + +.method public static $noinline$throw()V + .registers 1 + new-instance v0, Ljava/lang/Exception; + invoke-direct {v0}, Ljava/lang/Exception;-><init>()V + throw v0 +.end method + +# Test storing into the high vreg of a wide pair. This scenario has runtime +# behaviour implications so we run it from Main.main. + +## CHECK-START: int TestCase.invalidateLow(long) ssa_builder (after) +## CHECK-DAG: <<Cst0:i\d+>> IntConstant 0 +## CHECK-DAG: <<Arg:j\d+>> ParameterValue +## CHECK-DAG: <<Cast:i\d+>> TypeConversion [<<Arg>>] +## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[_,<<Cst0>>,<<Arg>>,_]] +## CHECK-DAG: InvokeStaticOrDirect method_name:TestCase.$noinline$throw env:[[_,<<Cast>>,<<Arg>>,_]] + +.method public static invalidateLow(J)I + .registers 4 + + const/4 v1, 0x0 + + :try_start + invoke-static {}, Ljava/lang/System;->nanoTime()J + move-wide v0, p0 + long-to-int v1, v0 + invoke-static {}, LTestCase;->$noinline$throw()V + :try_end + .catchall {:try_start .. :try_end} :catchall + + :catchall + return v1 + +.end method + +# Test that storing a wide invalidates the value in the high vreg. This +# cannot be detected from runtime so we only test the environment with Checker. + +## CHECK-START: void TestCase.invalidateHigh1(long) ssa_builder (after) +## CHECK-DAG: <<Arg:j\d+>> ParameterValue +## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[<<Arg>>,_,<<Arg>>,_]] + +.method public static invalidateHigh1(J)V + .registers 4 + + const/4 v1, 0x0 + move-wide v0, p0 + invoke-static {}, Ljava/lang/System;->nanoTime()J + return-void + +.end method + +## CHECK-START: void TestCase.invalidateHigh2(long) ssa_builder (after) +## CHECK-DAG: <<Arg:j\d+>> ParameterValue +## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[<<Arg>>,_,_,<<Arg>>,_]] + +.method public static invalidateHigh2(J)V + .registers 5 + + move-wide v1, p0 + move-wide v0, p0 + invoke-static {}, Ljava/lang/System;->nanoTime()J + return-void + +.end method diff --git a/test/550-checker-regression-wide-store/src/Main.java b/test/550-checker-regression-wide-store/src/Main.java new file mode 100644 index 0000000000..9b502df632 --- /dev/null +++ b/test/550-checker-regression-wide-store/src/Main.java @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Method; + +public class Main { + + // Workaround for b/18051191. + class InnerClass {} + + private static int runTestCase(String name, long arg) throws Exception { + Class<?> c = Class.forName("TestCase"); + Method m = c.getMethod(name, long.class); + int result = (Integer) m.invoke(null, arg); + return result; + } + + private static void assertEquals(int expected, int actual) { + if (expected != actual) { + throw new Error("Wrong result: " + expected + " != " + actual); + } + } + + public static void main(String[] args) throws Exception { + assertEquals(42, runTestCase("invalidateLow", 42L)); + } +} diff --git a/test/550-new-instance-clinit/expected.txt b/test/550-new-instance-clinit/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/550-new-instance-clinit/expected.txt diff --git a/test/550-new-instance-clinit/info.txt b/test/550-new-instance-clinit/info.txt new file mode 100644 index 0000000000..c5fa3c7cc9 --- /dev/null +++ b/test/550-new-instance-clinit/info.txt @@ -0,0 +1,3 @@ +Regression test for optimizing which used to treat +HNewInstance as not having side effects even though it +could invoke a clinit method. diff --git a/test/LambdaInterfaces/LambdaInterfaces.java b/test/550-new-instance-clinit/src/Main.java index 261163d268..45e259ef2c 100644 --- a/test/LambdaInterfaces/LambdaInterfaces.java +++ b/test/550-new-instance-clinit/src/Main.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 The Android Open Source Project + * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,20 +14,20 @@ * limitations under the License. */ -class LambdaInterfaces { - interface I { - public int i(); - } - interface J { - public String foo = "foo"; - public void j1(); - } - interface K extends J { - } - interface L { - public int sum(int a, int b); - } - interface C { - public String concat(String a, String b); +public class Main { + public static void main(String[] args) { + int foo = Main.a; + new Bar(); + foo = Main.a; + if (foo != 43) { + throw new Error("Expected 43, got " + foo); } + } + static int a = 42; +} + +class Bar { + static { + Main.a++; + } } diff --git a/test/955-lambda-smali/expected.txt b/test/955-lambda-smali/expected.txt index 8afe4bcca7..16381e4b46 100644 --- a/test/955-lambda-smali/expected.txt +++ b/test/955-lambda-smali/expected.txt @@ -26,5 +26,3 @@ Caught NPE (CaptureVariables) (0-args, 1 captured variable 'D'): value is -Infinity (CaptureVariables) (0-args, 8 captured variable 'ZBCSIJFD'): value is true,R,∂,1000,12345678,3287471278325742,Infinity,-Infinity (CaptureVariables) Caught NPE -(BoxInvoke) Hello boxing world! (0-args, no closure) void -(BoxInvoke) Hello boxing world!(1-args, no closure) returned: 12345678 diff --git a/test/955-lambda-smali/smali/BoxInvoke.smali b/test/955-lambda-smali/smali/BoxInvoke.smali deleted file mode 100644 index 8b53333396..0000000000 --- a/test/955-lambda-smali/smali/BoxInvoke.smali +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (C) 2015 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -.class public LBoxInvoke; -.super Ljava/lang/Object; - -.method public constructor <init>()V -.registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public static run()V - .registers 0 - - invoke-static {}, LBoxInvoke;->testBoxInvoke()V - invoke-static {}, LBoxInvoke;->forceGC()V - - return-void -.end method - -# Test that invoke-virtual works on boxed innate lambdas. -.method public static testBoxInvoke()V - .registers 100 - - # Try invoking 0-arg void return lambda - create-lambda v0, LBoxInvoke;->doHelloWorld0(J)V - const-string v2, "Ljava/lang/Runnable;" - box-lambda v2, v0 # Ljava/lang/Runnable; - invoke-interface {v2}, Ljava/lang/Runnable;->run()V - - # Try invoking 1-arg int return lambda - create-lambda v3, LBoxInvoke;->doHelloWorld1(JLjava/lang/Object;)I - const-string v5, "Ljava/lang/Comparable;" - box-lambda v5, v3 # Ljava/lang/Comparable; - const-string v6, "Hello boxing world!" - invoke-interface {v5, v6}, Ljava/lang/Comparable;->compareTo(Ljava/lang/Object;)I - move-result v7 - sget-object v8, Ljava/lang/System;->out:Ljava/io/PrintStream; - invoke-virtual {v8, v7}, Ljava/io/PrintStream;->println(I)V - - return-void - - # TODO: more tests once box-lambda can take a type descriptor. - -.end method - -#TODO: should use a closure type instead of a long. -.method public static doHelloWorld0(J)V - .registers 4 # 1 wide parameters, 2 locals - - const-string v0, "(BoxInvoke) Hello boxing world! (0-args, no closure) void" - - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - invoke-virtual {v1, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V - - return-void -.end method - -#TODO: should use a closure type instead of a long. -.method public static doHelloWorld1(JLjava/lang/Object;)I - # J = closure, L = obj, I = return type - .registers 6 # 1 wide parameters, 1 narrow parameter, 3 locals - - # Prints "<before> $parameter1(Object) <after>:" without the line terminator. - - const-string v0, "(BoxInvoke) " - - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - # System.out.print("<before>"); - invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V - - # System.out.print(obj); - invoke-virtual {v1, p2}, Ljava/io/PrintStream;->print(Ljava/lang/Object;)V - - # System.out.print("<after>: "); - const-string v0, "(1-args, no closure) returned: " - invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V - - const v2, 12345678 - return v2 -.end method - -# Force a GC. Used to ensure our weak reference table of boxed lambdas is getting swept. -.method private static forceGC()V - .registers 1 - invoke-static {}, Ljava/lang/Runtime;->getRuntime()Ljava/lang/Runtime; - move-result-object v0 - invoke-virtual {v0}, Ljava/lang/Runtime;->gc()V - - return-void -.end method diff --git a/test/955-lambda-smali/smali/BoxUnbox.smali b/test/955-lambda-smali/smali/BoxUnbox.smali index 157adb359d..915de2d55d 100644 --- a/test/955-lambda-smali/smali/BoxUnbox.smali +++ b/test/955-lambda-smali/smali/BoxUnbox.smali @@ -51,7 +51,6 @@ .registers 3 create-lambda v0, LBoxUnbox;->doHelloWorld(J)V - const-string v2, "Ljava/lang/Runnable;" box-lambda v2, v0 # v2 = box(v0) unbox-lambda v0, v2, J # v0 = unbox(v2) invoke-lambda v0, {} @@ -64,9 +63,7 @@ .registers 6 # 0 parameters, 6 locals create-lambda v0, LBoxUnbox;->doHelloWorld(J)V - const-string v2, "Ljava/lang/Runnable;" box-lambda v2, v0 # v2 = box(v0) - const-string v3, "Ljava/lang/Runnable;" box-lambda v3, v0 # v3 = box(v0) # The objects should be not-null, and they should have the same reference @@ -119,7 +116,6 @@ const v0, 0 # v0 = null const v1, 0 # v1 = null :start - const-string v2, "Ljava/lang/Runnable;" box-lambda v2, v0 # attempting to box a null lambda will throw NPE :end return-void diff --git a/test/955-lambda-smali/smali/CaptureVariables.smali b/test/955-lambda-smali/smali/CaptureVariables.smali index 531c2593f7..f18b7ff741 100644 --- a/test/955-lambda-smali/smali/CaptureVariables.smali +++ b/test/955-lambda-smali/smali/CaptureVariables.smali @@ -243,8 +243,6 @@ # TODO: create-lambda should not write to both v0 and v1 invoke-lambda v0, {} - return-void - .end method #TODO: should use a closure type instead of a long diff --git a/test/955-lambda-smali/smali/Main.smali b/test/955-lambda-smali/smali/Main.smali index e8ab84c87c..9892d6124e 100644 --- a/test/955-lambda-smali/smali/Main.smali +++ b/test/955-lambda-smali/smali/Main.smali @@ -25,7 +25,6 @@ invoke-static {}, LBoxUnbox;->run()V invoke-static {}, LMoveResult;->run()V invoke-static {}, LCaptureVariables;->run()V - invoke-static {}, LBoxInvoke;->run()V # TODO: add tests when verification fails diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk index 7a22e1b74a..f74a516486 100644 --- a/test/Android.libarttest.mk +++ b/test/Android.libarttest.mk @@ -37,7 +37,8 @@ LIBARTTEST_COMMON_SRC_FILES := \ 457-regs/regs_jni.cc \ 461-get-reference-vreg/get_reference_vreg_jni.cc \ 466-get-live-vreg/get_live_vreg_jni.cc \ - 497-inlining-and-class-loader/clear_dex_cache.cc + 497-inlining-and-class-loader/clear_dex_cache.cc \ + 543-env-long-ref/env_long_ref.cc ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh index 0747712e7f..47fc50fbd2 100755 --- a/tools/run-jdwp-tests.sh +++ b/tools/run-jdwp-tests.sh @@ -28,6 +28,18 @@ if [ ! -f $test_jar ]; then exit 1 fi +if [ "x$ART_USE_READ_BARRIER" = xtrue ]; then + # For the moment, skip JDWP tests when read barriers are enabled, as + # they sometimes exhibit a deadlock issue with the concurrent + # copying collector in the read barrier configuration, between the + # HeapTaskDeamon and the JDWP thread (b/25800335). + # + # TODO: Re-enable the JDWP tests when this deadlock issue is fixed. + echo "JDWP tests are temporarily disabled in the read barrier configuration because of" + echo "a deadlock issue (b/25800335)." + exit 0 +fi + art="/data/local/tmp/system/bin/art" art_debugee="sh /data/local/tmp/system/bin/art" args=$@ |