diff options
161 files changed, 3313 insertions, 778 deletions
diff --git a/build/Android.oat.mk b/build/Android.oat.mk index f53740e6e0..c733febd06 100644 --- a/build/Android.oat.mk +++ b/build/Android.oat.mk @@ -109,7 +109,7 @@ $$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency) --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \ $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \ - --host --android-root=$$(HOST_OUT) --include-patch-information \ + --host --android-root=$$(HOST_OUT) \ --generate-debug-info --generate-build-id --compile-pic \ $$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS) @@ -212,7 +212,7 @@ $$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency) --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \ --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \ --instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \ - --android-root=$$(PRODUCT_OUT)/system --include-patch-information \ + --android-root=$$(PRODUCT_OUT)/system \ --generate-debug-info --generate-build-id --compile-pic \ $$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1) diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h index 9bd25d8484..63c23cb074 100644 --- a/compiler/intrinsics_list.h +++ b/compiler/intrinsics_list.h @@ -24,6 +24,10 @@ // Note: adding a new intrinsic requires an art image version change, // as the modifiers flag for some ArtMethods will need to be changed. +// Note: j.l.Integer.valueOf says kNoThrow even though it could throw an OOME. +// The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf +// (kNoSideEffects), and it is also OK to remove it if it's unused. + #define INTRINSICS_LIST(V) \ V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \ V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \ @@ -149,7 +153,8 @@ V(UnsafeLoadFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "loadFence", "()V") \ V(UnsafeStoreFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "storeFence", "()V") \ V(UnsafeFullFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "fullFence", "()V") \ - V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") + V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \ + V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;") #endif // ART_COMPILER_INTRINSICS_LIST_H_ #undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint. diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 0ea11255a8..c5ec859d1f 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -104,6 +104,13 @@ inline uint32_t CodeAlignmentSize(uint32_t header_offset, const CompiledMethod& // Defines the location of the raw dex file to write. class OatWriter::DexFileSource { public: + enum Type { + kNone, + kZipEntry, + kRawFile, + kRawData, + }; + explicit DexFileSource(ZipEntry* zip_entry) : type_(kZipEntry), source_(zip_entry) { DCHECK(source_ != nullptr); @@ -119,6 +126,7 @@ class OatWriter::DexFileSource { DCHECK(source_ != nullptr); } + Type GetType() const { return type_; } bool IsZipEntry() const { return type_ == kZipEntry; } bool IsRawFile() const { return type_ == kRawFile; } bool IsRawData() const { return type_ == kRawData; } @@ -147,13 +155,6 @@ class OatWriter::DexFileSource { } private: - enum Type { - kNone, - kZipEntry, - kRawFile, - kRawData, - }; - Type type_; const void* source_; }; @@ -2265,10 +2266,28 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil /* verify */ true, /* verify_checksum */ true, &error_msg); - } else { - DCHECK(oat_dex_file->source_.IsRawFile()); + } else if (oat_dex_file->source_.IsRawFile()) { File* raw_file = oat_dex_file->source_.GetRawFile(); dex_file = DexFile::OpenDex(raw_file->Fd(), location, /* verify_checksum */ true, &error_msg); + } else { + // The source data is a vdex file. + CHECK(oat_dex_file->source_.IsRawData()) + << static_cast<size_t>(oat_dex_file->source_.GetType()); + const uint8_t* raw_dex_file = oat_dex_file->source_.GetRawData(); + // Note: The raw data has already been checked to contain the header + // and all the data that the header specifies as the file size. + DCHECK(raw_dex_file != nullptr); + DCHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file->GetLocation())); + const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file); + // Since the source may have had its layout changed, don't verify the checksum. + dex_file = DexFile::Open(raw_dex_file, + header->file_size_, + location, + oat_dex_file->dex_file_location_checksum_, + nullptr, + /* verify */ true, + /* verify_checksum */ false, + &error_msg); } if (dex_file == nullptr) { LOG(ERROR) << "Failed to open dex file for layout: " << error_msg; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index edccbd4904..18c95b3c41 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -4094,7 +4094,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok } void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { - IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_); if (intrinsic.TryDispatch(invoke)) { return; } @@ -4107,7 +4107,7 @@ void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* inv // art::PrepareForRegisterAllocation. DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); - IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena()); + IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_); if (intrinsic.TryDispatch(invoke)) { return; } diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index c9dde7cc55..791e63265e 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -2073,6 +2073,11 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { LOG(FATAL) << "Unreachable type " << instruction->GetType(); UNREACHABLE(); } + + if (type == Primitive::kPrimNot) { + Register out = locations->Out().AsRegister<Register>(); + __ MaybeUnpoisonHeapReference(out); + } } void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) { @@ -2200,7 +2205,31 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { DCHECK(!needs_write_barrier); } else { Register value = value_location.AsRegister<Register>(); - __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(value_type, Primitive::kPrimNot); + // Use Sw() instead of StoreToOffset() in order to be able to + // hold the poisoned reference in AT and thus avoid allocating + // yet another temporary register. + if (index.IsConstant()) { + if (!IsInt<16>(static_cast<int32_t>(data_offset))) { + int16_t low = Low16Bits(data_offset); + uint32_t high = data_offset - low; + __ Addiu32(TMP, obj, high); + base_reg = TMP; + data_offset = low; + } + } else { + DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))); + } + __ PoisonHeapReference(AT, value); + __ Sw(AT, base_reg, data_offset); + null_checker(); + } else { + __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker); + } if (needs_write_barrier) { DCHECK_EQ(value_type, Primitive::kPrimNot); codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull()); @@ -2208,6 +2237,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { } } else { DCHECK_EQ(value_type, Primitive::kPrimNot); + // Note: if heap poisoning is enabled, pAputObject takes care + // of poisoning the reference. codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); } @@ -2322,6 +2353,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) { __ Beqz(obj, slow_path->GetExitLabel()); // Compare the class of `obj` with `cls`. __ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value()); + __ MaybeUnpoisonHeapReference(obj_cls); __ Bne(obj_cls, cls, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -4958,6 +4990,9 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, dst = locations->Out().AsRegister<Register>(); } __ LoadFromOffset(load_type, dst, obj, offset, null_checker); + if (type == Primitive::kPrimNot) { + __ MaybeUnpoisonHeapReference(dst); + } } else { DCHECK(locations->Out().IsFpuRegister()); FRegister dst = locations->Out().AsFpuRegister<FRegister>(); @@ -5016,6 +5051,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, StoreOperandType store_type = kStoreByte; bool is_volatile = field_info.IsVolatile(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); + bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1)); auto null_checker = GetImplicitNullChecker(instruction); switch (type) { @@ -5089,7 +5125,16 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, } else { src = value_location.AsRegister<Register>(); } - __ StoreToOffset(store_type, src, obj, offset, null_checker); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(type, Primitive::kPrimNot); + __ PoisonHeapReference(TMP, src); + __ StoreToOffset(store_type, TMP, obj, offset, null_checker); + } else { + __ StoreToOffset(store_type, src, obj, offset, null_checker); + } } else { FRegister src = value_location.AsFpuRegister<FRegister>(); if (type == Primitive::kPrimFloat) { @@ -5101,7 +5146,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, } // TODO: memory barriers? - if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) { + if (needs_write_barrier) { Register src = value_location.AsRegister<Register>(); codegen_->MarkGCCard(obj, src, value_can_be_null); } @@ -5173,6 +5218,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { // Compare the class of `obj` with `cls`. __ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value()); + __ MaybeUnpoisonHeapReference(out); if (instruction->IsExactCheck()) { // Classes must be equal for the instanceof to succeed. __ Xor(out, out, cls); @@ -5239,6 +5285,14 @@ void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // However this is not required in practice, as this is an + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + __ MaybeUnpoisonHeapReference(temp); __ LoadFromOffset(kLoadWord, temp, temp, mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value()); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( @@ -5562,6 +5616,14 @@ void CodeGeneratorMIPS::GenerateVirtualCall(HInvokeVirtual* invoke, Location tem // temp = object->GetClass(); __ LoadFromOffset(kLoadWord, temp, receiver, class_offset); MaybeRecordImplicitNullCheck(invoke); + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // However this is not required in practice, as this is an + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetMethodAt(method_offset); __ LoadFromOffset(kLoadWord, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); @@ -5692,7 +5754,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex()); bool reordering = __ SetReorder(false); codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg); - __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678); + GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678); __ SetReorder(reordering); generate_null_check = true; break; @@ -5837,7 +5899,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex()); bool reordering = __ SetReorder(false); codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out, base_or_current_method_reg); - __ LoadFromOffset(kLoadWord, out, out, /* placeholder */ 0x5678); + GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678); __ SetReorder(reordering); SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load); codegen_->AddSlowPath(slow_path); @@ -6059,6 +6121,8 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) { } void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) { + // Note: if heap poisoning is enabled, the entry point takes care + // of poisoning the reference. codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>(); } @@ -6076,6 +6140,8 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) { } void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) { + // Note: if heap poisoning is enabled, the entry point takes care + // of poisoning the reference. if (instruction->IsStringAlloc()) { // String is allocated through StringFactory. Call NewEmptyString entry point. Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>(); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 5be0da4011..817854b507 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -1653,6 +1653,11 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { if (!maybe_compressed_char_at) { codegen_->MaybeRecordImplicitNullCheck(instruction); } + + if (type == Primitive::kPrimNot) { + GpuRegister out = locations->Out().AsRegister<GpuRegister>(); + __ MaybeUnpoisonHeapReference(out); + } } void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) { @@ -1740,16 +1745,49 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimNot: { if (!needs_runtime_call) { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); + GpuRegister base_reg; GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ StoreToOffset(kStoreWord, value, obj, offset); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; + base_reg = obj; } else { DCHECK(index.IsRegister()) << index; __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4); __ Daddu(TMP, obj, TMP); - __ StoreToOffset(kStoreWord, value, TMP, data_offset); + base_reg = TMP; + } + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(value_type, Primitive::kPrimNot); + // Use Sw() instead of StoreToOffset() in order to be able to + // hold the poisoned reference in AT and thus avoid allocating + // yet another temporary register. + if (index.IsConstant()) { + if (!IsInt<16>(static_cast<int32_t>(data_offset))) { + int16_t low16 = Low16Bits(data_offset); + // For consistency with StoreToOffset() and such treat data_offset as int32_t. + uint64_t high48 = static_cast<uint64_t>(static_cast<int32_t>(data_offset)) - low16; + int16_t upper16 = High16Bits(high48); + // Allow the full [-2GB,+2GB) range in case `low16` is negative and needs a + // compensatory 64KB added, which may push `high48` above 2GB and require + // the dahi instruction. + int16_t higher16 = High32Bits(high48) + ((upper16 < 0) ? 1 : 0); + __ Daui(TMP, obj, upper16); + if (higher16 != 0) { + __ Dahi(TMP, higher16); + } + base_reg = TMP; + data_offset = low16; + } + } else { + DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))); + } + __ PoisonHeapReference(AT, value); + __ Sw(AT, base_reg, data_offset); + } else { + __ StoreToOffset(kStoreWord, value, base_reg, data_offset); } codegen_->MaybeRecordImplicitNullCheck(instruction); if (needs_write_barrier) { @@ -1758,6 +1796,8 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { } } else { DCHECK_EQ(value_type, Primitive::kPrimNot); + // Note: if heap poisoning is enabled, pAputObject takes care + // of poisoning the reference. codegen_->InvokeRuntime(kQuickAputObject, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>(); } @@ -1871,6 +1911,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) { __ Beqzc(obj, slow_path->GetExitLabel()); // Compare the class of `obj` with `cls`. __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value()); + __ MaybeUnpoisonHeapReference(obj_cls); __ Bnec(obj_cls, cls, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -3086,6 +3127,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction, LocationSummary* locations = instruction->GetLocations(); GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>(); LoadOperandType load_type = kLoadUnsignedByte; + uint32_t offset = field_info.GetFieldOffset().Uint32Value(); switch (type) { case Primitive::kPrimBoolean: load_type = kLoadUnsignedByte; @@ -3117,15 +3159,20 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction, if (!Primitive::IsFloatingPointType(type)) { DCHECK(locations->Out().IsRegister()); GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); - __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value()); + __ LoadFromOffset(load_type, dst, obj, offset); } else { DCHECK(locations->Out().IsFpuRegister()); FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>(); - __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value()); + __ LoadFpuFromOffset(load_type, dst, obj, offset); } codegen_->MaybeRecordImplicitNullCheck(instruction); // TODO: memory barrier? + + if (type == Primitive::kPrimNot) { + GpuRegister dst = locations->Out().AsRegister<GpuRegister>(); + __ MaybeUnpoisonHeapReference(dst); + } } void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction, @@ -3147,6 +3194,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction, LocationSummary* locations = instruction->GetLocations(); GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>(); StoreOperandType store_type = kStoreByte; + uint32_t offset = field_info.GetFieldOffset().Uint32Value(); + bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1)); switch (type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: @@ -3172,16 +3221,25 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction, if (!Primitive::IsFloatingPointType(type)) { DCHECK(locations->InAt(1).IsRegister()); GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>(); - __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value()); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(type, Primitive::kPrimNot); + __ PoisonHeapReference(TMP, src); + __ StoreToOffset(store_type, TMP, obj, offset); + } else { + __ StoreToOffset(store_type, src, obj, offset); + } } else { DCHECK(locations->InAt(1).IsFpuRegister()); FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>(); - __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value()); + __ StoreFpuToOffset(store_type, src, obj, offset); } codegen_->MaybeRecordImplicitNullCheck(instruction); // TODO: memory barriers? - if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) { + if (needs_write_barrier) { DCHECK(locations->InAt(1).IsRegister()); GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>(); codegen_->MarkGCCard(obj, src, value_can_be_null); @@ -3247,6 +3305,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { // Compare the class of `obj` with `cls`. __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value()); + __ MaybeUnpoisonHeapReference(out); if (instruction->IsExactCheck()) { // Classes must be equal for the instanceof to succeed. __ Xor(out, out, cls); @@ -3325,6 +3384,14 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invo __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset); } codegen_->MaybeRecordImplicitNullCheck(invoke); + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // However this is not required in practice, as this is an + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + __ MaybeUnpoisonHeapReference(temp); __ LoadFromOffset(kLoadDoubleword, temp, temp, mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value()); uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( @@ -3567,6 +3634,14 @@ void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location t // temp = object->GetClass(); __ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset); MaybeRecordImplicitNullCheck(invoke); + // Instead of simply (possibly) unpoisoning `temp` here, we should + // emit a read barrier for the previous class reference load. + // However this is not required in practice, as this is an + // intermediate/temporary reference and because the current + // concurrent copying collector keeps the from-space memory + // intact/accessible until the end of the marking phase (the + // concurrent copying collector may not in the future). + __ MaybeUnpoisonHeapReference(temp); // temp = temp->GetMethodAt(method_offset); __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset); // T9 = temp->GetEntryPoint(); @@ -3666,8 +3741,8 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S case HLoadClass::LoadKind::kBssEntry: { CodeGeneratorMIPS64::PcRelativePatchInfo* info = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex()); - codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT); - __ Lwu(out, AT, /* placeholder */ 0x5678); + codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out); + GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678); generate_null_check = true; break; } @@ -3773,8 +3848,8 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); CodeGeneratorMIPS64::PcRelativePatchInfo* info = codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex()); - codegen_->EmitPcRelativeAddressPlaceholderHigh(info, AT); - __ Lwu(out, AT, /* placeholder */ 0x5678); + codegen_->EmitPcRelativeAddressPlaceholderHigh(info, out); + GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678); SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load); codegen_->AddSlowPath(slow_path); __ Beqzc(out, slow_path->GetEntryLabel()); @@ -3944,6 +4019,8 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) { } void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) { + // Note: if heap poisoning is enabled, the entry point takes care + // of poisoning the reference. codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>(); } @@ -3961,6 +4038,8 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) { } void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) { + // Note: if heap poisoning is enabled, the entry point takes care + // of poisoning the reference. if (instruction->IsStringAlloc()) { // String is allocated through StringFactory. Call NewEmptyString entry point. GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>(); diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc index 5539413aad..1cd65c1c66 100644 --- a/compiler/optimizing/induction_var_range.cc +++ b/compiler/optimizing/induction_var_range.cc @@ -377,6 +377,53 @@ bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) co return false; } +bool InductionVarRange::IsUnitStride(HInstruction* instruction, + /*out*/ HInstruction** offset) const { + HLoopInformation* loop = nullptr; + HInductionVarAnalysis::InductionInfo* info = nullptr; + HInductionVarAnalysis::InductionInfo* trip = nullptr; + if (HasInductionInfo(instruction, instruction, &loop, &info, &trip)) { + if (info->induction_class == HInductionVarAnalysis::kLinear && + info->op_b->operation == HInductionVarAnalysis::kFetch) { + int64_t stride_value = 0; + if (IsConstant(info->op_a, kExact, &stride_value) && stride_value == 1) { + int64_t off_value = 0; + if (IsConstant(info->op_b, kExact, &off_value) && off_value == 0) { + *offset = nullptr; + } else { + *offset = info->op_b->fetch; + } + return true; + } + } + } + return false; +} + +HInstruction* InductionVarRange::GenerateTripCount(HLoopInformation* loop, + HGraph* graph, + HBasicBlock* block) { + HInductionVarAnalysis::InductionInfo *trip = + induction_analysis_->LookupInfo(loop, GetLoopControl(loop)); + if (trip != nullptr && !IsUnsafeTripCount(trip)) { + HInstruction* taken_test = nullptr; + HInstruction* trip_expr = nullptr; + if (IsBodyTripCount(trip)) { + if (!GenerateCode(trip->op_b, nullptr, graph, block, &taken_test, false, false)) { + return nullptr; + } + } + if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) { + if (taken_test != nullptr) { + HInstruction* zero = graph->GetConstant(trip->type, 0); + trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc)); + } + return trip_expr; + } + } + return nullptr; +} + // // Private class methods. // @@ -1157,12 +1204,15 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, HInstruction* opb = nullptr; switch (info->induction_class) { case HInductionVarAnalysis::kInvariant: - // Invariants (note that even though is_min does not impact code generation for - // invariants, some effort is made to keep this parameter consistent). + // Invariants (note that since invariants only have other invariants as + // sub expressions, viz. no induction, there is no need to adjust is_min). switch (info->operation) { case HInductionVarAnalysis::kAdd: - case HInductionVarAnalysis::kRem: // no proper is_min for second arg - case HInductionVarAnalysis::kXor: // no proper is_min for second arg + case HInductionVarAnalysis::kSub: + case HInductionVarAnalysis::kMul: + case HInductionVarAnalysis::kDiv: + case HInductionVarAnalysis::kRem: + case HInductionVarAnalysis::kXor: case HInductionVarAnalysis::kLT: case HInductionVarAnalysis::kLE: case HInductionVarAnalysis::kGT: @@ -1174,6 +1224,12 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, switch (info->operation) { case HInductionVarAnalysis::kAdd: operation = new (graph->GetArena()) HAdd(type, opa, opb); break; + case HInductionVarAnalysis::kSub: + operation = new (graph->GetArena()) HSub(type, opa, opb); break; + case HInductionVarAnalysis::kMul: + operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break; + case HInductionVarAnalysis::kDiv: + operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break; case HInductionVarAnalysis::kRem: operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break; case HInductionVarAnalysis::kXor: @@ -1194,16 +1250,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, return true; } break; - case HInductionVarAnalysis::kSub: // second reversed! - if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) && - GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) { - if (graph != nullptr) { - *result = Insert(block, new (graph->GetArena()) HSub(type, opa, opb)); - } - return true; - } - break; - case HInductionVarAnalysis::kNeg: // reversed! + case HInductionVarAnalysis::kNeg: if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) { if (graph != nullptr) { *result = Insert(block, new (graph->GetArena()) HNeg(type, opb)); @@ -1240,9 +1287,9 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, } } break; - default: - break; - } + case HInductionVarAnalysis::kNop: + LOG(FATAL) << "unexpected invariant nop"; + } // switch invariant operation break; case HInductionVarAnalysis::kLinear: { // Linear induction a * i + b, for normalized 0 <= i < TC. For ranges, this should @@ -1293,7 +1340,7 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, } break; } - } + } // switch induction class } return false; } diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h index 6c424b78b9..0858d73982 100644 --- a/compiler/optimizing/induction_var_range.h +++ b/compiler/optimizing/induction_var_range.h @@ -24,7 +24,8 @@ namespace art { /** * This class implements range analysis on expressions within loops. It takes the results * of induction variable analysis in the constructor and provides a public API to obtain - * a conservative lower and upper bound value on each instruction in the HIR. + * a conservative lower and upper bound value or last value on each instruction in the HIR. + * The public API also provides a few general-purpose utility methods related to induction. * * The range analysis is done with a combination of symbolic and partial integral evaluation * of expressions. The analysis avoids complications with wrap-around arithmetic on the integral @@ -154,6 +155,19 @@ class InductionVarRange { */ bool IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const; + /** + * Checks if instruction is a unit stride induction inside the closest enveloping loop. + * Returns invariant offset on success. + */ + bool IsUnitStride(HInstruction* instruction, /*out*/ HInstruction** offset) const; + + /** + * Generates the trip count expression for the given loop. Code is generated in given block + * and graph. The expression is guarded by a taken test if needed. Returns the trip count + * expression on success or null otherwise. + */ + HInstruction* GenerateTripCount(HLoopInformation* loop, HGraph* graph, HBasicBlock* block); + private: /* * Enum used in IsConstant() request. diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc index d81817fb09..fcdf8eb7dc 100644 --- a/compiler/optimizing/induction_var_range_test.cc +++ b/compiler/optimizing/induction_var_range_test.cc @@ -48,6 +48,11 @@ class InductionVarRangeTest : public CommonCompilerTest { EXPECT_EQ(v1.is_known, v2.is_known); } + void ExpectInt(int32_t value, HInstruction* i) { + ASSERT_TRUE(i->IsIntConstant()); + EXPECT_EQ(value, i->AsIntConstant()->GetValue()); + } + // // Construction methods. // @@ -757,10 +762,20 @@ TEST_F(InductionVarRangeTest, ConstantTripCountUp) { // Last value (unsimplified). HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_); ASSERT_TRUE(last->IsAdd()); - ASSERT_TRUE(last->InputAt(0)->IsIntConstant()); - EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue()); - ASSERT_TRUE(last->InputAt(1)->IsIntConstant()); - EXPECT_EQ(0, last->InputAt(1)->AsIntConstant()->GetValue()); + ExpectInt(1000, last->InputAt(0)); + ExpectInt(0, last->InputAt(1)); + + // Loop logic. + int64_t tc = 0; + EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc)); + EXPECT_EQ(1000, tc); + HInstruction* offset = nullptr; + EXPECT_TRUE(range_.IsUnitStride(phi, &offset)); + EXPECT_TRUE(offset == nullptr); + HInstruction* tce = range_.GenerateTripCount( + loop_header_->GetLoopInformation(), graph_, loop_preheader_); + ASSERT_TRUE(tce != nullptr); + ExpectInt(1000, tce); } TEST_F(InductionVarRangeTest, ConstantTripCountDown) { @@ -799,15 +814,27 @@ TEST_F(InductionVarRangeTest, ConstantTripCountDown) { // Last value (unsimplified). HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_); ASSERT_TRUE(last->IsSub()); - ASSERT_TRUE(last->InputAt(0)->IsIntConstant()); - EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue()); + ExpectInt(1000, last->InputAt(0)); ASSERT_TRUE(last->InputAt(1)->IsNeg()); last = last->InputAt(1)->InputAt(0); ASSERT_TRUE(last->IsSub()); - ASSERT_TRUE(last->InputAt(0)->IsIntConstant()); - EXPECT_EQ(0, last->InputAt(0)->AsIntConstant()->GetValue()); - ASSERT_TRUE(last->InputAt(1)->IsIntConstant()); - EXPECT_EQ(1000, last->InputAt(1)->AsIntConstant()->GetValue()); + ExpectInt(0, last->InputAt(0)); + ExpectInt(1000, last->InputAt(1)); + + // Loop logic. + int64_t tc = 0; + EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc)); + EXPECT_EQ(1000, tc); + HInstruction* offset = nullptr; + EXPECT_FALSE(range_.IsUnitStride(phi, &offset)); + HInstruction* tce = range_.GenerateTripCount( + loop_header_->GetLoopInformation(), graph_, loop_preheader_); + ASSERT_TRUE(tce != nullptr); + ASSERT_TRUE(tce->IsNeg()); + last = tce->InputAt(0); + EXPECT_TRUE(last->IsSub()); + ExpectInt(0, last->InputAt(0)); + ExpectInt(1000, last->InputAt(1)); } TEST_F(InductionVarRangeTest, SymbolicTripCountUp) { @@ -851,27 +878,22 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) { // Verify lower is 0+0. ASSERT_TRUE(lower != nullptr); ASSERT_TRUE(lower->IsAdd()); - ASSERT_TRUE(lower->InputAt(0)->IsIntConstant()); - EXPECT_EQ(0, lower->InputAt(0)->AsIntConstant()->GetValue()); - ASSERT_TRUE(lower->InputAt(1)->IsIntConstant()); - EXPECT_EQ(0, lower->InputAt(1)->AsIntConstant()->GetValue()); + ExpectInt(0, lower->InputAt(0)); + ExpectInt(0, lower->InputAt(1)); // Verify upper is (V-1)+0. ASSERT_TRUE(upper != nullptr); ASSERT_TRUE(upper->IsAdd()); ASSERT_TRUE(upper->InputAt(0)->IsSub()); EXPECT_TRUE(upper->InputAt(0)->InputAt(0)->IsParameterValue()); - ASSERT_TRUE(upper->InputAt(0)->InputAt(1)->IsIntConstant()); - EXPECT_EQ(1, upper->InputAt(0)->InputAt(1)->AsIntConstant()->GetValue()); - ASSERT_TRUE(upper->InputAt(1)->IsIntConstant()); - EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue()); + ExpectInt(1, upper->InputAt(0)->InputAt(1)); + ExpectInt(0, upper->InputAt(1)); // Verify taken-test is 0<V. HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_); ASSERT_TRUE(taken != nullptr); ASSERT_TRUE(taken->IsLessThan()); - ASSERT_TRUE(taken->InputAt(0)->IsIntConstant()); - EXPECT_EQ(0, taken->InputAt(0)->AsIntConstant()->GetValue()); + ExpectInt(0, taken->InputAt(0)); EXPECT_TRUE(taken->InputAt(1)->IsParameterValue()); // Replacement. @@ -880,6 +902,21 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) { EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(1), v1); ExpectEqual(Value(y_, 1, 0), v2); + + // Loop logic. + int64_t tc = 0; + EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc)); + EXPECT_EQ(0, tc); // unknown + HInstruction* offset = nullptr; + EXPECT_TRUE(range_.IsUnitStride(phi, &offset)); + EXPECT_TRUE(offset == nullptr); + HInstruction* tce = range_.GenerateTripCount( + loop_header_->GetLoopInformation(), graph_, loop_preheader_); + ASSERT_TRUE(tce != nullptr); + EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test + ExpectInt(0, tce->InputAt(0)); + EXPECT_TRUE(tce->InputAt(1)->IsParameterValue()); + EXPECT_TRUE(tce->InputAt(2)->IsLessThan()); } TEST_F(InductionVarRangeTest, SymbolicTripCountDown) { @@ -923,32 +960,26 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) { // Verify lower is 1000-((1000-V)-1). ASSERT_TRUE(lower != nullptr); ASSERT_TRUE(lower->IsSub()); - ASSERT_TRUE(lower->InputAt(0)->IsIntConstant()); - EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue()); + ExpectInt(1000, lower->InputAt(0)); lower = lower->InputAt(1); ASSERT_TRUE(lower->IsSub()); - ASSERT_TRUE(lower->InputAt(1)->IsIntConstant()); - EXPECT_EQ(1, lower->InputAt(1)->AsIntConstant()->GetValue()); + ExpectInt(1, lower->InputAt(1)); lower = lower->InputAt(0); ASSERT_TRUE(lower->IsSub()); - ASSERT_TRUE(lower->InputAt(0)->IsIntConstant()); - EXPECT_EQ(1000, lower->InputAt(0)->AsIntConstant()->GetValue()); + ExpectInt(1000, lower->InputAt(0)); EXPECT_TRUE(lower->InputAt(1)->IsParameterValue()); // Verify upper is 1000-0. ASSERT_TRUE(upper != nullptr); ASSERT_TRUE(upper->IsSub()); - ASSERT_TRUE(upper->InputAt(0)->IsIntConstant()); - EXPECT_EQ(1000, upper->InputAt(0)->AsIntConstant()->GetValue()); - ASSERT_TRUE(upper->InputAt(1)->IsIntConstant()); - EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue()); + ExpectInt(1000, upper->InputAt(0)); + ExpectInt(0, upper->InputAt(1)); // Verify taken-test is 1000>V. HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_); ASSERT_TRUE(taken != nullptr); ASSERT_TRUE(taken->IsGreaterThan()); - ASSERT_TRUE(taken->InputAt(0)->IsIntConstant()); - EXPECT_EQ(1000, taken->InputAt(0)->AsIntConstant()->GetValue()); + ExpectInt(1000, taken->InputAt(0)); EXPECT_TRUE(taken->InputAt(1)->IsParameterValue()); // Replacement. @@ -957,6 +988,23 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) { EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(y_, 1, 0), v1); ExpectEqual(Value(999), v2); + + // Loop logic. + int64_t tc = 0; + EXPECT_TRUE(range_.IsFinite(loop_header_->GetLoopInformation(), &tc)); + EXPECT_EQ(0, tc); // unknown + HInstruction* offset = nullptr; + EXPECT_FALSE(range_.IsUnitStride(phi, &offset)); + HInstruction* tce = range_.GenerateTripCount( + loop_header_->GetLoopInformation(), graph_, loop_preheader_); + ASSERT_TRUE(tce != nullptr); + EXPECT_TRUE(tce->IsSelect()); // guarded by taken-test + ExpectInt(0, tce->InputAt(0)); + EXPECT_TRUE(tce->InputAt(1)->IsSub()); + EXPECT_TRUE(tce->InputAt(2)->IsGreaterThan()); + tce = tce->InputAt(1); + ExpectInt(1000, taken->InputAt(0)); + EXPECT_TRUE(taken->InputAt(1)->IsParameterValue()); } } // namespace art diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 17d683f357..8df80adc9f 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -19,6 +19,7 @@ #include "art_method.h" #include "class_linker.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "invoke_type.h" #include "mirror/dex_cache-inl.h" #include "nodes.h" @@ -178,4 +179,112 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) return os; } +void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke, + CodeGenerator* codegen, + Location return_location, + Location first_argument_location) { + if (Runtime::Current()->IsAotCompiler()) { + if (codegen->GetCompilerOptions().IsBootImage() || + codegen->GetCompilerOptions().GetCompilePic()) { + // TODO(ngeoffray): Support boot image compilation. + return; + } + } + + IntegerValueOfInfo info = ComputeIntegerValueOfInfo(); + + // Most common case is that we have found all we needed (classes are initialized + // and in the boot image). Bail if not. + if (info.integer_cache == nullptr || + info.integer == nullptr || + info.cache == nullptr || + info.value_offset == 0 || + // low and high cannot be 0, per the spec. + info.low == 0 || + info.high == 0) { + LOG(INFO) << "Integer.valueOf will not be optimized"; + return; + } + + // The intrinsic will call if it needs to allocate a j.l.Integer. + LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary( + invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); + if (!invoke->InputAt(0)->IsConstant()) { + locations->SetInAt(0, Location::RequiresRegister()); + } + locations->AddTemp(first_argument_location); + locations->SetOut(return_location); +} + +IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() { + // Note that we could cache all of the data looked up here. but there's no good + // location for it. We don't want to add it to WellKnownClasses, to avoid creating global + // jni values. Adding it as state to the compiler singleton seems like wrong + // separation of concerns. + // The need for this data should be pretty rare though. + + // The most common case is that the classes are in the boot image and initialized, + // which is easy to generate code for. We bail if not. + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + Runtime* runtime = Runtime::Current(); + ClassLinker* class_linker = runtime->GetClassLinker(); + gc::Heap* heap = runtime->GetHeap(); + IntegerValueOfInfo info; + info.integer_cache = class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;"); + if (info.integer_cache == nullptr) { + self->ClearException(); + return info; + } + if (!heap->ObjectIsInBootImageSpace(info.integer_cache) || !info.integer_cache->IsInitialized()) { + // Optimization only works if the class is initialized and in the boot image. + return info; + } + info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;"); + if (info.integer == nullptr) { + self->ClearException(); + return info; + } + if (!heap->ObjectIsInBootImageSpace(info.integer) || !info.integer->IsInitialized()) { + // Optimization only works if the class is initialized and in the boot image. + return info; + } + + ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;"); + if (field == nullptr) { + return info; + } + info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>( + field->GetObject(info.integer_cache).Ptr()); + if (info.cache == nullptr) { + return info; + } + + if (!heap->ObjectIsInBootImageSpace(info.cache)) { + // Optimization only works if the object is in the boot image. + return info; + } + + field = info.integer->FindDeclaredInstanceField("value", "I"); + if (field == nullptr) { + return info; + } + info.value_offset = field->GetOffset().Int32Value(); + + field = info.integer_cache->FindDeclaredStaticField("low", "I"); + if (field == nullptr) { + return info; + } + info.low = field->GetInt(info.integer_cache); + + field = info.integer_cache->FindDeclaredStaticField("high", "I"); + if (field == nullptr) { + return info; + } + info.high = field->GetInt(info.integer_cache); + + DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1); + return info; +} + } // namespace art diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 6425e1313f..9da5a7fa3b 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -113,6 +113,39 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) codegen->GetMoveResolver()->EmitNativeCode(¶llel_move); } + static void ComputeIntegerValueOfLocations(HInvoke* invoke, + CodeGenerator* codegen, + Location return_location, + Location first_argument_location); + + // Temporary data structure for holding Integer.valueOf useful data. We only + // use it if the mirror::Class* are in the boot image, so it is fine to keep raw + // mirror::Class pointers in this structure. + struct IntegerValueOfInfo { + IntegerValueOfInfo() + : integer_cache(nullptr), + integer(nullptr), + cache(nullptr), + low(0), + high(0), + value_offset(0) {} + + // The java.lang.IntegerCache class. + mirror::Class* integer_cache; + // The java.lang.Integer class. + mirror::Class* integer; + // Value of java.lang.IntegerCache#cache. + mirror::ObjectArray<mirror::Object>* cache; + // Value of java.lang.IntegerCache#low. + int32_t low; + // Value of java.lang.IntegerCache#high. + int32_t high; + // The offset of java.lang.Integer.value. + int32_t value_offset; + }; + + static IntegerValueOfInfo ComputeIntegerValueOfInfo(); + protected: IntrinsicVisitor() {} diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index c262cf983d..86000e9356 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -129,6 +129,7 @@ class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode { IntrinsicLocationsBuilderARM::IntrinsicLocationsBuilderARM(CodeGeneratorARM* codegen) : arena_(codegen->GetGraph()->GetArena()), + codegen_(codegen), assembler_(codegen->GetAssembler()), features_(codegen->GetInstructionSetFeatures()) {} @@ -2644,6 +2645,75 @@ void IntrinsicCodeGeneratorARM::VisitReferenceGetReferent(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderARM::VisitIntegerValueOf(HInvoke* invoke) { + InvokeRuntimeCallingConvention calling_convention; + IntrinsicVisitor::ComputeIntegerValueOfLocations( + invoke, + codegen_, + Location::RegisterLocation(R0), + Location::RegisterLocation(calling_convention.GetRegisterAt(0))); +} + +void IntrinsicCodeGeneratorARM::VisitIntegerValueOf(HInvoke* invoke) { + IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); + LocationSummary* locations = invoke->GetLocations(); + ArmAssembler* const assembler = GetAssembler(); + + Register out = locations->Out().AsRegister<Register>(); + InvokeRuntimeCallingConvention calling_convention; + Register argument = calling_convention.GetRegisterAt(0); + if (invoke->InputAt(0)->IsConstant()) { + int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); + if (value >= info.low && value <= info.high) { + // Just embed the j.l.Integer in the code. + ScopedObjectAccess soa(Thread::Current()); + mirror::Object* boxed = info.cache->Get(value + (-info.low)); + DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); + __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address)); + } else { + // Allocate and initialize a new j.l.Integer. + // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the + // JIT object table. + uint32_t address = + dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ LoadImmediate(IP, value); + __ StoreToOffset(kStoreWord, IP, out, info.value_offset); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + } + } else { + Register in = locations->InAt(0).AsRegister<Register>(); + // Check bounds of our cache. + __ AddConstant(out, in, -info.low); + __ CmpConstant(out, info.high - info.low + 1); + Label allocate, done; + __ b(&allocate, HS); + // If the value is within the bounds, load the j.l.Integer directly from the array. + uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); + __ LoadLiteral(IP, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); + codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), IP, out); + __ MaybeUnpoisonHeapReference(out); + __ b(&done); + __ Bind(&allocate); + // Otherwise allocate and initialize a new j.l.Integer. + address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ LoadLiteral(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ StoreToOffset(kStoreWord, in, out, info.value_offset); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + __ Bind(&done); + } +} + UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble) UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat) UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble) diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h index 7f20ea4b1f..2840863632 100644 --- a/compiler/optimizing/intrinsics_arm.h +++ b/compiler/optimizing/intrinsics_arm.h @@ -51,6 +51,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) private: ArenaAllocator* arena_; + CodeGenerator* codegen_; ArmAssembler* assembler_; const ArmInstructionSetFeatures& features_; diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 86e54294ae..6c3938c1a9 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2924,6 +2924,79 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) { + InvokeRuntimeCallingConvention calling_convention; + IntrinsicVisitor::ComputeIntegerValueOfLocations( + invoke, + codegen_, + calling_convention.GetReturnLocation(Primitive::kPrimNot), + Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); +} + +void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { + IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); + LocationSummary* locations = invoke->GetLocations(); + MacroAssembler* masm = GetVIXLAssembler(); + + Register out = RegisterFrom(locations->Out(), Primitive::kPrimNot); + UseScratchRegisterScope temps(masm); + Register temp = temps.AcquireW(); + InvokeRuntimeCallingConvention calling_convention; + Register argument = calling_convention.GetRegisterAt(0); + if (invoke->InputAt(0)->IsConstant()) { + int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); + if (value >= info.low && value <= info.high) { + // Just embed the j.l.Integer in the code. + ScopedObjectAccess soa(Thread::Current()); + mirror::Object* boxed = info.cache->Get(value + (-info.low)); + DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); + __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); + } else { + // Allocate and initialize a new j.l.Integer. + // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the + // JIT object table. + uint32_t address = + dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ Mov(temp.W(), value); + __ Str(temp.W(), HeapOperand(out.W(), info.value_offset)); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + } + } else { + Register in = RegisterFrom(locations->InAt(0), Primitive::kPrimInt); + // Check bounds of our cache. + __ Add(out.W(), in.W(), -info.low); + __ Cmp(out.W(), info.high - info.low + 1); + vixl::aarch64::Label allocate, done; + __ B(&allocate, hs); + // If the value is within the bounds, load the j.l.Integer directly from the array. + uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); + __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); + MemOperand source = HeapOperand( + temp, out.X(), LSL, Primitive::ComponentSizeShift(Primitive::kPrimNot)); + codegen_->Load(Primitive::kPrimNot, out, source); + codegen_->GetAssembler()->MaybeUnpoisonHeapReference(out); + __ B(&done); + __ Bind(&allocate); + // Otherwise allocate and initialize a new j.l.Integer. + address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ Str(in.W(), HeapOperand(out.W(), info.value_offset)); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + __ Bind(&done); + } +} + UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit) diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h index 28e41cb086..3c53517b28 100644 --- a/compiler/optimizing/intrinsics_arm64.h +++ b/compiler/optimizing/intrinsics_arm64.h @@ -38,7 +38,8 @@ class CodeGeneratorARM64; class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { public: - explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena) : arena_(arena) {} + explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen) + : arena_(arena), codegen_(codegen) {} // Define visitor methods. @@ -56,6 +57,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) private: ArenaAllocator* arena_; + CodeGeneratorARM64* codegen_; DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64); }; diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 70a3d38c13..aa89deae34 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -203,6 +203,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen) : arena_(codegen->GetGraph()->GetArena()), + codegen_(codegen), assembler_(codegen->GetAssembler()), features_(codegen->GetInstructionSetFeatures()) {} @@ -2988,6 +2989,77 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) { __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0)); } +void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) { + InvokeRuntimeCallingConventionARMVIXL calling_convention; + IntrinsicVisitor::ComputeIntegerValueOfLocations( + invoke, + codegen_, + LocationFrom(r0), + LocationFrom(calling_convention.GetRegisterAt(0))); +} + +void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) { + IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); + LocationSummary* locations = invoke->GetLocations(); + ArmVIXLAssembler* const assembler = GetAssembler(); + + vixl32::Register out = RegisterFrom(locations->Out()); + UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + InvokeRuntimeCallingConventionARMVIXL calling_convention; + vixl32::Register argument = calling_convention.GetRegisterAt(0); + if (invoke->InputAt(0)->IsConstant()) { + int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); + if (value >= info.low && value <= info.high) { + // Just embed the j.l.Integer in the code. + ScopedObjectAccess soa(Thread::Current()); + mirror::Object* boxed = info.cache->Get(value + (-info.low)); + DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); + __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address)); + } else { + // Allocate and initialize a new j.l.Integer. + // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the + // JIT object table. + uint32_t address = + dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ Mov(temp, value); + assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + } + } else { + vixl32::Register in = RegisterFrom(locations->InAt(0)); + // Check bounds of our cache. + __ Add(out, in, -info.low); + __ Cmp(out, info.high - info.low + 1); + vixl32::Label allocate, done; + __ B(hs, &allocate); + // If the value is within the bounds, load the j.l.Integer directly from the array. + uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); + __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address)); + codegen_->LoadFromShiftedRegOffset(Primitive::kPrimNot, locations->Out(), temp, out); + assembler->MaybeUnpoisonHeapReference(out); + __ B(&done); + __ Bind(&allocate); + // Otherwise allocate and initialize a new j.l.Integer. + address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + assembler->StoreToOffset(kStoreWord, in, out, info.value_offset); + // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation + // one. + codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); + __ Bind(&done); + } +} + UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe? UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat) // Could be done by changing rounding mode, maybe? UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure. diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h index 6e79cb76a1..023cba1349 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.h +++ b/compiler/optimizing/intrinsics_arm_vixl.h @@ -47,6 +47,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) private: ArenaAllocator* arena_; + CodeGenerator* codegen_; ArmVIXLAssembler* assembler_; const ArmInstructionSetFeatures& features_; diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 64a68403e9..ba006edfa2 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -1572,6 +1572,10 @@ static void GenUnsafeGet(HInvoke* invoke, __ Lwr(trg, TMP, 0); __ Lwl(trg, TMP, 3); } + + if (type == Primitive::kPrimNot) { + __ MaybeUnpoisonHeapReference(trg); + } } } @@ -1663,6 +1667,11 @@ static void GenUnsafePut(LocationSummary* locations, if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) { Register value = locations->InAt(3).AsRegister<Register>(); + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + __ PoisonHeapReference(AT, value); + value = AT; + } + if (is_R6) { __ Sw(value, TMP, 0); } else { @@ -1852,13 +1861,23 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat codegen->MarkGCCard(base, value, value_can_be_null); } + MipsLabel loop_head, exit_loop; + __ Addu(TMP, base, offset_lo); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + __ PoisonHeapReference(expected); + // Do not poison `value`, if it is the same register as + // `expected`, which has just been poisoned. + if (value != expected) { + __ PoisonHeapReference(value); + } + } + // do { // tmp_value = [tmp_ptr] - expected; // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value)); // result = tmp_value != 0; - MipsLabel loop_head, exit_loop; - __ Addu(TMP, base, offset_lo); __ Sync(0); __ Bind(&loop_head); if ((type == Primitive::kPrimInt) || (type == Primitive::kPrimNot)) { @@ -1868,8 +1887,8 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat __ LlR2(out, TMP); } } else { - LOG(FATAL) << "Unsupported op size " << type; - UNREACHABLE(); + LOG(FATAL) << "Unsupported op size " << type; + UNREACHABLE(); } __ Subu(out, out, expected); // If we didn't get the 'expected' __ Sltiu(out, out, 1); // value, set 'out' to false, and @@ -1894,6 +1913,15 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat // cycle atomically then retry. __ Bind(&exit_loop); __ Sync(0); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + __ UnpoisonHeapReference(expected); + // Do not unpoison `value`, if it is the same register as + // `expected`, which has just been unpoisoned. + if (value != expected) { + __ UnpoisonHeapReference(value); + } + } } // boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x) @@ -1989,20 +2017,24 @@ void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) { __ LoadConst32(out, 1); return; } - - // Check if input is null, return false if it is. - __ Beqz(arg, &return_false); + StringEqualsOptimizations optimizations(invoke); + if (!optimizations.GetArgumentNotNull()) { + // Check if input is null, return false if it is. + __ Beqz(arg, &return_false); + } // Reference equality check, return true if same reference. __ Beq(str, arg, &return_true); - // Instanceof check for the argument by comparing class fields. - // All string objects must have the same type since String cannot be subclassed. - // Receiver must be a string object, so its class field is equal to all strings' class fields. - // If the argument is a string object, its class field must be equal to receiver's class field. - __ Lw(temp1, str, class_offset); - __ Lw(temp2, arg, class_offset); - __ Bne(temp1, temp2, &return_false); + if (!optimizations.GetArgumentIsString()) { + // Instanceof check for the argument by comparing class fields. + // All string objects must have the same type since String cannot be subclassed. + // Receiver must be a string object, so its class field is equal to all strings' class fields. + // If the argument is a string object, its class field must be equal to receiver's class field. + __ Lw(temp1, str, class_offset); + __ Lw(temp2, arg, class_offset); + __ Bne(temp1, temp2, &return_false); + } // Load `count` fields of this and argument strings. __ Lw(temp1, str, count_offset); @@ -2682,6 +2714,8 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt) UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong) UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject) +UNIMPLEMENTED_INTRINSIC(MIPS, IntegerValueOf) + UNREACHABLE_INTRINSICS(MIPS) #undef __ diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 3888828722..21c5074a1c 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -1187,6 +1187,7 @@ static void GenUnsafeGet(HInvoke* invoke, case Primitive::kPrimNot: __ Lwu(trg, TMP, 0); + __ MaybeUnpoisonHeapReference(trg); break; case Primitive::kPrimLong: @@ -1285,7 +1286,12 @@ static void GenUnsafePut(LocationSummary* locations, switch (type) { case Primitive::kPrimInt: case Primitive::kPrimNot: - __ Sw(value, TMP, 0); + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + __ PoisonHeapReference(AT, value); + __ Sw(AT, TMP, 0); + } else { + __ Sw(value, TMP, 0); + } break; case Primitive::kPrimLong: @@ -1454,13 +1460,23 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat codegen->MarkGCCard(base, value, value_can_be_null); } + Mips64Label loop_head, exit_loop; + __ Daddu(TMP, base, offset); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + __ PoisonHeapReference(expected); + // Do not poison `value`, if it is the same register as + // `expected`, which has just been poisoned. + if (value != expected) { + __ PoisonHeapReference(value); + } + } + // do { // tmp_value = [tmp_ptr] - expected; // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value)); // result = tmp_value != 0; - Mips64Label loop_head, exit_loop; - __ Daddu(TMP, base, offset); __ Sync(0); __ Bind(&loop_head); if (type == Primitive::kPrimLong) { @@ -1469,6 +1485,11 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat // Note: We will need a read barrier here, when read barrier // support is added to the MIPS64 back end. __ Ll(out, TMP); + if (type == Primitive::kPrimNot) { + // The LL instruction sign-extends the 32-bit value, but + // 32-bit references must be zero-extended. Zero-extend `out`. + __ Dext(out, out, 0, 32); + } } __ Dsubu(out, out, expected); // If we didn't get the 'expected' __ Sltiu(out, out, 1); // value, set 'out' to false, and @@ -1487,6 +1508,15 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat // cycle atomically then retry. __ Bind(&exit_loop); __ Sync(0); + + if (kPoisonHeapReferences && type == Primitive::kPrimNot) { + __ UnpoisonHeapReference(expected); + // Do not unpoison `value`, if it is the same register as + // `expected`, which has just been unpoisoned. + if (value != expected) { + __ UnpoisonHeapReference(value); + } + } } // boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x) @@ -1593,19 +1623,24 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) { return; } - // Check if input is null, return false if it is. - __ Beqzc(arg, &return_false); + StringEqualsOptimizations optimizations(invoke); + if (!optimizations.GetArgumentNotNull()) { + // Check if input is null, return false if it is. + __ Beqzc(arg, &return_false); + } // Reference equality check, return true if same reference. __ Beqc(str, arg, &return_true); - // Instanceof check for the argument by comparing class fields. - // All string objects must have the same type since String cannot be subclassed. - // Receiver must be a string object, so its class field is equal to all strings' class fields. - // If the argument is a string object, its class field must be equal to receiver's class field. - __ Lw(temp1, str, class_offset); - __ Lw(temp2, arg, class_offset); - __ Bnec(temp1, temp2, &return_false); + if (!optimizations.GetArgumentIsString()) { + // Instanceof check for the argument by comparing class fields. + // All string objects must have the same type since String cannot be subclassed. + // Receiver must be a string object, so its class field is equal to all strings' class fields. + // If the argument is a string object, its class field must be equal to receiver's class field. + __ Lw(temp1, str, class_offset); + __ Lw(temp2, arg, class_offset); + __ Bnec(temp1, temp2, &return_false); + } // Load `count` fields of this and argument strings. __ Lw(temp1, str, count_offset); @@ -2075,6 +2110,8 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject) +UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerValueOf) + UNREACHABLE_INTRINSICS(MIPS64) #undef __ diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index e1b7ea53b4..a671788ff5 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -3335,6 +3335,65 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ Bind(intrinsic_slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) { + InvokeRuntimeCallingConvention calling_convention; + IntrinsicVisitor::ComputeIntegerValueOfLocations( + invoke, + codegen_, + Location::RegisterLocation(EAX), + Location::RegisterLocation(calling_convention.GetRegisterAt(0))); +} + +void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) { + IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); + LocationSummary* locations = invoke->GetLocations(); + X86Assembler* assembler = GetAssembler(); + + Register out = locations->Out().AsRegister<Register>(); + InvokeRuntimeCallingConvention calling_convention; + if (invoke->InputAt(0)->IsConstant()) { + int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); + if (value >= info.low && value <= info.high) { + // Just embed the j.l.Integer in the code. + ScopedObjectAccess soa(Thread::Current()); + mirror::Object* boxed = info.cache->Get(value + (-info.low)); + DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); + __ movl(out, Immediate(address)); + } else { + // Allocate and initialize a new j.l.Integer. + // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the + // JIT object table. + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ movl(calling_convention.GetRegisterAt(0), Immediate(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ movl(Address(out, info.value_offset), Immediate(value)); + } + } else { + Register in = locations->InAt(0).AsRegister<Register>(); + // Check bounds of our cache. + __ leal(out, Address(in, -info.low)); + __ cmpl(out, Immediate(info.high - info.low + 1)); + NearLabel allocate, done; + __ j(kAboveEqual, &allocate); + // If the value is within the bounds, load the j.l.Integer directly from the array. + uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); + __ movl(out, Address(out, TIMES_4, data_offset + address)); + __ MaybeUnpoisonHeapReference(out); + __ jmp(&done); + __ Bind(&allocate); + // Otherwise allocate and initialize a new j.l.Integer. + address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ movl(calling_convention.GetRegisterAt(0), Immediate(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ movl(Address(out, info.value_offset), in); + __ Bind(&done); + } +} + UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble) UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 05d270a4e6..9a6dd985a4 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -39,7 +39,6 @@ IntrinsicLocationsBuilderX86_64::IntrinsicLocationsBuilderX86_64(CodeGeneratorX8 : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) { } - X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() { return down_cast<X86_64Assembler*>(codegen_->GetAssembler()); } @@ -2995,6 +2994,65 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) { + InvokeRuntimeCallingConvention calling_convention; + IntrinsicVisitor::ComputeIntegerValueOfLocations( + invoke, + codegen_, + Location::RegisterLocation(RAX), + Location::RegisterLocation(calling_convention.GetRegisterAt(0))); +} + +void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) { + IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); + LocationSummary* locations = invoke->GetLocations(); + X86_64Assembler* assembler = GetAssembler(); + + CpuRegister out = locations->Out().AsRegister<CpuRegister>(); + InvokeRuntimeCallingConvention calling_convention; + if (invoke->InputAt(0)->IsConstant()) { + int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); + if (value >= info.low && value <= info.high) { + // Just embed the j.l.Integer in the code. + ScopedObjectAccess soa(Thread::Current()); + mirror::Object* boxed = info.cache->Get(value + (-info.low)); + DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); + __ movl(out, Immediate(address)); + } else { + // Allocate and initialize a new j.l.Integer. + // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the + // JIT object table. + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ movl(Address(out, info.value_offset), Immediate(value)); + } + } else { + CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>(); + // Check bounds of our cache. + __ leal(out, Address(in, -info.low)); + __ cmpl(out, Immediate(info.high - info.low + 1)); + NearLabel allocate, done; + __ j(kAboveEqual, &allocate); + // If the value is within the bounds, load the j.l.Integer directly from the array. + uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); + __ movl(out, Address(out, TIMES_4, data_offset + address)); + __ MaybeUnpoisonHeapReference(out); + __ jmp(&done); + __ Bind(&allocate); + // Otherwise allocate and initialize a new j.l.Integer. + address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); + __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address)); + codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); + __ movl(Address(out, info.value_offset), in); + __ Bind(&done); + } +} + UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite) diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 8a9e61875a..c39aed2c6a 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1914,6 +1914,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { virtual bool IsControlFlow() const { return false; } + // Can the instruction throw? + // TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance), + // could throw OOME, but it is still OK to remove them if they are unused. virtual bool CanThrow() const { return false; } bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); } diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h index 322f6c4d70..e81e767575 100644 --- a/compiler/utils/arm/assembler_arm_vixl.h +++ b/compiler/utils/arm/assembler_arm_vixl.h @@ -135,6 +135,16 @@ class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler { // jumping within 2KB range. For B(cond, label), because the supported branch range is 256 // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps. void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true); + + // Use literal for generating double constant if it doesn't fit VMOV encoding. + void Vmov(vixl32::DRegister rd, double imm) { + if (vixl::VFP::IsImmFP64(imm)) { + MacroAssembler::Vmov(rd, imm); + } else { + MacroAssembler::Vldr(rd, imm); + } + } + using MacroAssembler::Vmov; }; class ArmVIXLAssembler FINAL : public Assembler { diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 5e83e825ed..2e2231b07d 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -3475,8 +3475,8 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberO CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister()); LoadFromOffset(kLoadWord, dest.AsCoreRegister(), base.AsMips().AsCoreRegister(), offs.Int32Value()); - if (kPoisonHeapReferences && unpoison_reference) { - Subu(dest.AsCoreRegister(), ZERO, dest.AsCoreRegister()); + if (unpoison_reference) { + MaybeUnpoisonHeapReference(dest.AsCoreRegister()); } } diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index 2fca185ec3..47ddf2547a 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -727,6 +727,38 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi void Pop(Register rd); void PopAndReturn(Register rd, Register rt); + // + // Heap poisoning. + // + + // Poison a heap reference contained in `src` and store it in `dst`. + void PoisonHeapReference(Register dst, Register src) { + // dst = -src. + Subu(dst, ZERO, src); + } + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(Register reg) { + // reg = -reg. + PoisonHeapReference(reg, reg); + } + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(Register reg) { + // reg = -reg. + Subu(reg, ZERO, reg); + } + // Poison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybePoisonHeapReference(Register reg) { + if (kPoisonHeapReferences) { + PoisonHeapReference(reg); + } + } + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(Register reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } + } + void Bind(Label* label) OVERRIDE { Bind(down_cast<MipsLabel*>(label)); } diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index 998f2c709b..0f86f8843d 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -488,6 +488,11 @@ void Mips64Assembler::Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16) { EmitI(0xf, rs, rt, imm16); } +void Mips64Assembler::Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16) { + CHECK_NE(rs, ZERO); + EmitI(0x1d, rs, rt, imm16); +} + void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) { EmitI(1, rs, static_cast<GpuRegister>(6), imm16); } @@ -2367,12 +2372,8 @@ void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, Membe CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister()); LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), base.AsMips64().AsGpuRegister(), offs.Int32Value()); - if (kPoisonHeapReferences && unpoison_reference) { - // TODO: review - // Negate the 32-bit ref - Dsubu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister()); - // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64 - Dext(dest.AsGpuRegister(), dest.AsGpuRegister(), 0, 32); + if (unpoison_reference) { + MaybeUnpoisonHeapReference(dest.AsGpuRegister()); } } diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index a0a1db634d..ee15c6da80 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -512,6 +512,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer void Ldpc(GpuRegister rs, uint32_t imm18); // MIPS64 void Lui(GpuRegister rt, uint16_t imm16); void Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16); + void Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64 void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64 void Dati(GpuRegister rs, uint16_t imm16); // MIPS64 void Sync(uint32_t stype); @@ -654,6 +655,44 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer void Addiu32(GpuRegister rt, GpuRegister rs, int32_t value); void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT); // MIPS64 + // + // Heap poisoning. + // + + // Poison a heap reference contained in `src` and store it in `dst`. + void PoisonHeapReference(GpuRegister dst, GpuRegister src) { + // dst = -src. + // Negate the 32-bit ref. + Dsubu(dst, ZERO, src); + // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64. + Dext(dst, dst, 0, 32); + } + // Poison a heap reference contained in `reg`. + void PoisonHeapReference(GpuRegister reg) { + // reg = -reg. + PoisonHeapReference(reg, reg); + } + // Unpoison a heap reference contained in `reg`. + void UnpoisonHeapReference(GpuRegister reg) { + // reg = -reg. + // Negate the 32-bit ref. + Dsubu(reg, ZERO, reg); + // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64. + Dext(reg, reg, 0, 32); + } + // Poison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybePoisonHeapReference(GpuRegister reg) { + if (kPoisonHeapReferences) { + PoisonHeapReference(reg); + } + } + // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. + void MaybeUnpoisonHeapReference(GpuRegister reg) { + if (kPoisonHeapReferences) { + UnpoisonHeapReference(reg); + } + } + void Bind(Label* label) OVERRIDE { Bind(down_cast<Mips64Label*>(label)); } diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc index 74b8f068c1..96a02c46d7 100644 --- a/compiler/utils/mips64/assembler_mips64_test.cc +++ b/compiler/utils/mips64/assembler_mips64_test.cc @@ -1269,6 +1269,24 @@ TEST_F(AssemblerMIPS64Test, Lui) { DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lui, 16, "lui ${reg}, {imm}"), "lui"); } +TEST_F(AssemblerMIPS64Test, Daui) { + std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters(); + std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters(); + reg2_registers.erase(reg2_registers.begin()); // reg2 can't be ZERO, remove it. + std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits */ 16, /* as_uint */ true); + WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size()); + std::ostringstream expected; + for (mips64::GpuRegister* reg1 : reg1_registers) { + for (mips64::GpuRegister* reg2 : reg2_registers) { + for (int64_t imm : imms) { + __ Daui(*reg1, *reg2, imm); + expected << "daui $" << *reg1 << ", $" << *reg2 << ", " << imm << "\n"; + } + } + } + DriverStr(expected.str(), "daui"); +} + TEST_F(AssemblerMIPS64Test, Dahi) { DriverStr(RepeatRIb(&mips64::Mips64Assembler::Dahi, 16, "dahi ${reg}, ${reg}, {imm}"), "dahi"); } diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index be756286fc..dbae70ec14 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -2821,6 +2821,9 @@ static int CompileImage(Dex2Oat& dex2oat) { // When given --host, finish early without stripping. if (dex2oat.IsHost()) { + if (!dex2oat.FlushCloseOutputFiles()) { + return EXIT_FAILURE; + } dex2oat.DumpTiming(); return EXIT_SUCCESS; } diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc index 2c0b125fb7..b79050e9d0 100644 --- a/dex2oat/dex2oat_test.cc +++ b/dex2oat/dex2oat_test.cc @@ -37,6 +37,8 @@ namespace art { +using android::base::StringPrintf; + class Dex2oatTest : public Dex2oatEnvironmentTest { public: virtual void TearDown() OVERRIDE { @@ -52,10 +54,18 @@ class Dex2oatTest : public Dex2oatEnvironmentTest { const std::string& odex_location, CompilerFilter::Filter filter, const std::vector<std::string>& extra_args = {}, - bool expect_success = true) { + bool expect_success = true, + bool use_fd = false) { + std::unique_ptr<File> oat_file; std::vector<std::string> args; args.push_back("--dex-file=" + dex_location); - args.push_back("--oat-file=" + odex_location); + if (use_fd) { + oat_file.reset(OS::CreateEmptyFile(odex_location.c_str())); + CHECK(oat_file != nullptr) << odex_location; + args.push_back("--oat-fd=" + std::to_string(oat_file->Fd())); + } else { + args.push_back("--oat-file=" + odex_location); + } args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter)); args.push_back("--runtime-arg"); args.push_back("-Xnorelocate"); @@ -64,6 +74,9 @@ class Dex2oatTest : public Dex2oatEnvironmentTest { std::string error_msg; bool success = Dex2Oat(args, &error_msg); + if (oat_file != nullptr) { + ASSERT_EQ(oat_file->FlushClose(), 0) << "Could not flush and close oat file"; + } if (expect_success) { ASSERT_TRUE(success) << error_msg << std::endl << output_; @@ -582,12 +595,11 @@ class Dex2oatLayoutTest : public Dex2oatTest { ASSERT_TRUE(result); } - void RunTest() { - std::string dex_location = GetScratchDir() + "/DexNoOat.jar"; - std::string profile_location = GetScratchDir() + "/primary.prof"; - std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex"; - - Copy(GetDexSrc2(), dex_location); + void CompileProfileOdex(const std::string& dex_location, + const std::string& odex_location, + bool use_fd, + const std::vector<std::string>& extra_args = {}) { + const std::string profile_location = GetScratchDir() + "/primary.prof"; const char* location = dex_location.c_str(); std::string error_msg; std::vector<std::unique_ptr<const DexFile>> dex_files; @@ -595,14 +607,61 @@ class Dex2oatLayoutTest : public Dex2oatTest { EXPECT_EQ(dex_files.size(), 1U); std::unique_ptr<const DexFile>& dex_file = dex_files[0]; GenerateProfile(profile_location, dex_location, dex_file->GetLocationChecksum()); + std::vector<std::string> copy(extra_args); + copy.push_back("--profile-file=" + profile_location); + GenerateOdexForTest(dex_location, + odex_location, + CompilerFilter::kSpeedProfile, + copy, + /* expect_success */ true, + use_fd); + } - const std::vector<std::string>& extra_args = { "--profile-file=" + profile_location }; - GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile, extra_args); + void RunTest() { + std::string dex_location = GetScratchDir() + "/DexNoOat.jar"; + std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex"; + Copy(GetDexSrc2(), dex_location); + + CompileProfileOdex(dex_location, odex_location, /* use_fd */ false); CheckValidity(); ASSERT_TRUE(success_); CheckResult(dex_location, odex_location); } + + void RunTestVDex() { + std::string dex_location = GetScratchDir() + "/DexNoOat.jar"; + std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex"; + std::string vdex_location = GetOdexDir() + "/DexOdexNoOat.vdex"; + Copy(GetDexSrc2(), dex_location); + + std::unique_ptr<File> vdex_file1(OS::CreateEmptyFile(vdex_location.c_str())); + CHECK(vdex_file1 != nullptr) << vdex_location; + ScratchFile vdex_file2; + { + std::string input_vdex = "--input-vdex-fd=-1"; + std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file1->Fd()); + CompileProfileOdex(dex_location, + odex_location, + /* use_fd */ true, + { input_vdex, output_vdex }); + EXPECT_GT(vdex_file1->GetLength(), 0u); + } + { + std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd()); + std::string output_vdex = StringPrintf("--output-vdex-fd=%d", vdex_file2.GetFd()); + CompileProfileOdex(dex_location, + odex_location, + /* use_fd */ true, + { input_vdex, output_vdex }); + EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u); + } + ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file"; + CheckValidity(); + ASSERT_TRUE(success_); + CheckResult(dex_location, odex_location); + } + void CheckResult(const std::string& dex_location, const std::string& odex_location) { // Host/target independent checks. std::string error_msg; @@ -641,29 +700,33 @@ class Dex2oatLayoutTest : public Dex2oatTest { EXPECT_EQ(odex_file->GetCompilerFilter(), CompilerFilter::kSpeedProfile); } - // Check whether the dex2oat run was really successful. - void CheckValidity() { - if (kIsTargetBuild) { - CheckTargetValidity(); - } else { - CheckHostValidity(); - } + // Check whether the dex2oat run was really successful. + void CheckValidity() { + if (kIsTargetBuild) { + CheckTargetValidity(); + } else { + CheckHostValidity(); } + } - void CheckTargetValidity() { - // TODO: Ignore for now. - } + void CheckTargetValidity() { + // TODO: Ignore for now. + } - // On the host, we can get the dex2oat output. Here, look for "dex2oat took." - void CheckHostValidity() { - EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_; - } - }; + // On the host, we can get the dex2oat output. Here, look for "dex2oat took." + void CheckHostValidity() { + EXPECT_NE(output_.find("dex2oat took"), std::string::npos) << output_; + } +}; TEST_F(Dex2oatLayoutTest, TestLayout) { RunTest(); } +TEST_F(Dex2oatLayoutTest, TestVdexLayout) { + RunTestVDex(); +} + class Dex2oatWatchdogTest : public Dex2oatTest { protected: void RunTest(bool expect_success, const std::vector<std::string>& extra_args = {}) { diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc index 131f4b9f63..a69409947e 100644 --- a/dexlayout/dex_ir.cc +++ b/dexlayout/dex_ir.cc @@ -649,7 +649,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file, } } int32_t size = DecodeSignedLeb128(&handlers_data); - bool has_catch_all = size < 0; + bool has_catch_all = size <= 0; if (has_catch_all) { size = -size; } diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc index 22619b9e8d..4aa8b82ec7 100644 --- a/dexlayout/dexlayout.cc +++ b/dexlayout/dexlayout.cc @@ -1529,10 +1529,18 @@ std::vector<dex_ir::ClassData*> DexLayout::LayoutClassDefsAndClassData(const Dex // NOTE: If the section following the code items is byte aligned, the last code item is left in // place to preserve alignment. Layout needs an overhaul to handle movement of other sections. int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order) { + // Do not move code items if class data section precedes code item section. + // ULEB encoding is variable length, causing problems determining the offset of the code items. + // TODO: We should swap the order of these sections in the future to avoid this issue. + uint32_t class_data_offset = header_->GetCollections().ClassDatasOffset(); + uint32_t code_item_offset = header_->GetCollections().CodeItemsOffset(); + if (class_data_offset < code_item_offset) { + return 0; + } + // Find the last code item so we can leave it in place if the next section is not 4 byte aligned. std::unordered_set<dex_ir::CodeItem*> visited_code_items; - uint32_t offset = header_->GetCollections().CodeItemsOffset(); - bool is_code_item_aligned = IsNextSectionCodeItemAligned(offset); + bool is_code_item_aligned = IsNextSectionCodeItemAligned(code_item_offset); if (!is_code_item_aligned) { dex_ir::CodeItem* last_code_item = nullptr; for (auto& code_item_pair : header_->GetCollections().CodeItems()) { @@ -1552,18 +1560,18 @@ int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_dat dex_ir::CodeItem* code_item = method->GetCodeItem(); if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) { visited_code_items.insert(code_item); - diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset()); - code_item->SetOffset(offset); - offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment); + diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset()); + code_item->SetOffset(code_item_offset); + code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment); } } for (auto& method : *class_data->VirtualMethods()) { dex_ir::CodeItem* code_item = method->GetCodeItem(); if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) { visited_code_items.insert(code_item); - diff += UnsignedLeb128Size(offset) - UnsignedLeb128Size(code_item->GetOffset()); - code_item->SetOffset(offset); - offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment); + diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset()); + code_item->SetOffset(code_item_offset); + code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment); } } } diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc index 9f0593a5cd..2d084c1990 100644 --- a/dexlayout/dexlayout_test.cc +++ b/dexlayout/dexlayout_test.cc @@ -75,6 +75,26 @@ static const char kUnreferencedCatchHandlerInputDex[] = "AAAEAQAABgAAAAEAAAAkAQAAASAAAAIAAABEAQAAARAAAAIAAADIAQAAAiAAABIAAADWAQAAAyAA" "AAIAAAC1AgAAACAAAAEAAADIAgAAABAAAAEAAADYAgAA"; +// Dex file with 0-size (catch all only) catch handler unreferenced by try blocks. +// Constructed by building a dex file with try/catch blocks and hex editing. +static const char kUnreferenced0SizeCatchHandlerInputDex[] = + "ZGV4CjAzNQCEbEEvMstSNpQpjPdfMEfUBS48cis2QRJoAwAAcAAAAHhWNBIAAAAAAAAAAMgCAAAR" + "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAQAAAD8AAAAAQAAABwBAAAsAgAAPAEAAOoB" + "AADyAQAABAIAABMCAAAqAgAAPgIAAFICAABmAgAAaQIAAG0CAACCAgAAhgIAAIoCAACQAgAAlQIA" + "AJ4CAACiAgAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACQAAAAcAAAAFAAAAAAAAAAgAAAAFAAAA" + "3AEAAAgAAAAFAAAA5AEAAAQAAQANAAAAAAAAAAAAAAAAAAIADAAAAAEAAQAOAAAAAgAAAAAAAAAA" + "AAAAAQAAAAIAAAAAAAAAAQAAAAAAAAC5AgAAAAAAAAEAAQABAAAApgIAAAQAAABwEAMAAAAOAAQA" + "AQACAAIAqwIAAC8AAABiAAAAGgEPAG4gAgAQAGIAAAAaAQoAbiACABAAYgAAABoBEABuIAIAEABi" + "AAAAGgELAG4gAgAQAA4ADQBiAQAAGgIKAG4gAgAhACcADQBiAQAAGgILAG4gAgAhACcAAAAAAAAA" + "BwABAA4AAAAHAAEAAgAdACYAAAABAAAAAwAAAAEAAAAGAAY8aW5pdD4AEEhhbmRsZXJUZXN0Lmph" + "dmEADUxIYW5kbGVyVGVzdDsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmpl" + "Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xhbmcvU3lzdGVtOwABVgACVkwAE1tMamF2" + "YS9sYW5nL1N0cmluZzsAAmYxAAJmMgAEbWFpbgADb3V0AAdwcmludGxuAAJ0MQACdDIAAQAHDgAE" + "AQAHDnl7eXkCeB2bAAAAAgAAgYAEvAIBCdQCAA0AAAAAAAAAAQAAAAAAAAABAAAAEQAAAHAAAAAC" + "AAAABwAAALQAAAADAAAAAwAAANAAAAAEAAAAAQAAAPQAAAAFAAAABAAAAPwAAAAGAAAAAQAAABwB" + "AAABIAAAAgAAADwBAAABEAAAAgAAANwBAAACIAAAEQAAAOoBAAADIAAAAgAAAKYCAAAAIAAAAQAA" + "ALkCAAAAEAAAAQAAAMgCAAA="; + // Dex file with multiple code items that have the same debug_info_off_. Constructed by a modified // dexlayout on XandY. static const char kDexFileDuplicateOffset[] = @@ -145,6 +165,21 @@ static const char kUnalignedCodeInfoInputDex[] = "AAEAAAC4AAAAASAAAAIAAADYAAAAAiAAAAYAAAACAQAAAyAAAAIAAAAxAQAAACAAAAEAAAA7AQAA" "ABAAAAEAAABMAQAA"; +// Dex file with class data section preceding code items. +// Constructed by passing dex file through dexmerger tool and hex editing. +static const char kClassDataBeforeCodeInputDex[] = + "ZGV4CjAzNQCZKmCu3XXn4zvxCh5VH0gZNNobEAcsc49EAgAAcAAAAHhWNBIAAAAAAAAAAAQBAAAJ" + "AAAAcAAAAAQAAACUAAAAAgAAAKQAAAAAAAAAAAAAAAUAAAC8AAAAAQAAAOQAAABAAQAABAEAAPgB" + "AAAAAgAACAIAAAsCAAAQAgAAJAIAACcCAAAqAgAALQIAAAIAAAADAAAABAAAAAUAAAACAAAAAAAA" + "AAAAAAAFAAAAAwAAAAAAAAABAAEAAAAAAAEAAAAGAAAAAQAAAAcAAAABAAAACAAAAAIAAQAAAAAA" + "AQAAAAEAAAACAAAAAAAAAAEAAAAAAAAAjAEAAAAAAAALAAAAAAAAAAEAAAAAAAAAAQAAAAkAAABw" + "AAAAAgAAAAQAAACUAAAAAwAAAAIAAACkAAAABQAAAAUAAAC8AAAABgAAAAEAAADkAAAAABAAAAEA" + "AAAEAQAAACAAAAEAAACMAQAAASAAAAQAAACkAQAAAiAAAAkAAAD4AQAAAyAAAAQAAAAwAgAAAAAB" + "AwCBgASkAwEBvAMBAdADAQHkAwAAAQABAAEAAAAwAgAABAAAAHAQBAAAAA4AAgABAAAAAAA1AgAA" + "AgAAABIQDwACAAEAAAAAADoCAAACAAAAEiAPAAIAAQAAAAAAPwIAAAIAAAASMA8ABjxpbml0PgAG" + "QS5qYXZhAAFJAANMQTsAEkxqYXZhL2xhbmcvT2JqZWN0OwABVgABYQABYgABYwABAAcOAAMABw4A" + "BgAHDgAJAAcOAA=="; + static void WriteBase64ToFile(const char* base64, File* file) { // Decode base64. CHECK(base64 != nullptr); @@ -282,8 +317,8 @@ class DexLayoutTest : public CommonRuntimeTest { return true; } - // Runs UnreferencedCatchHandlerTest. - bool UnreferencedCatchHandlerExec(std::string* error_msg) { + // Runs UnreferencedCatchHandlerTest & Unreferenced0SizeCatchHandlerTest. + bool UnreferencedCatchHandlerExec(std::string* error_msg, const char* filename) { ScratchFile tmp_file; std::string tmp_name = tmp_file.GetFilename(); size_t tmp_last_slash = tmp_name.rfind("/"); @@ -291,7 +326,7 @@ class DexLayoutTest : public CommonRuntimeTest { // Write inputs and expected outputs. std::string input_dex = tmp_dir + "classes.dex"; - WriteFileBase64(kUnreferencedCatchHandlerInputDex, input_dex.c_str()); + WriteFileBase64(filename, input_dex.c_str()); std::string output_dex = tmp_dir + "classes.dex.new"; std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout"; @@ -343,8 +378,18 @@ TEST_F(DexLayoutTest, UnreferencedCatchHandler) { // Disable test on target. TEST_DISABLED_FOR_TARGET(); std::string error_msg; - ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg)) << error_msg; + ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg, + kUnreferencedCatchHandlerInputDex)) << error_msg; } + +TEST_F(DexLayoutTest, Unreferenced0SizeCatchHandler) { + // Disable test on target. + TEST_DISABLED_FOR_TARGET(); + std::string error_msg; + ASSERT_TRUE(UnreferencedCatchHandlerExec(&error_msg, + kUnreferenced0SizeCatchHandlerInputDex)) << error_msg; +} + TEST_F(DexLayoutTest, DuplicateOffset) { ScratchFile temp; WriteBase64ToFile(kDexFileDuplicateOffset, temp.GetFile()); @@ -418,4 +463,22 @@ TEST_F(DexLayoutTest, UnalignedCodeInfo) { } } +TEST_F(DexLayoutTest, ClassDataBeforeCode) { + ScratchFile temp; + WriteBase64ToFile(kClassDataBeforeCodeInputDex, temp.GetFile()); + ScratchFile temp2; + WriteBase64ToFile(kDexFileLayoutInputProfile, temp2.GetFile()); + EXPECT_EQ(temp.GetFile()->Flush(), 0); + std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout"; + EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path"; + std::vector<std::string> dexlayout_exec_argv = + { dexlayout, "-p", temp2.GetFilename(), "-o", "/dev/null", temp.GetFilename() }; + std::string error_msg; + const bool result = ::art::Exec(dexlayout_exec_argv, &error_msg); + EXPECT_TRUE(result); + if (!result) { + LOG(ERROR) << "Error " << error_msg; + } +} + } // namespace art diff --git a/runtime/Android.bp b/runtime/Android.bp index b4c7b9cc6a..9958814f58 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -156,6 +156,7 @@ cc_defaults { "native/java_lang_Thread.cc", "native/java_lang_Throwable.cc", "native/java_lang_VMClassLoader.cc", + "native/java_lang_Void.cc", "native/java_lang_invoke_MethodHandleImpl.cc", "native/java_lang_ref_FinalizerReference.cc", "native/java_lang_ref_Reference.cc", diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index ec8ae85722..4f7b4957b6 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -2048,11 +2048,12 @@ ENTRY_NO_GP art_quick_indexof lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length() #endif slt $t1, $a2, $zero # if fromIndex < 0 -#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) +#if defined(_MIPS_ARCH_MIPS32R6) seleqz $a2, $a2, $t1 # fromIndex = 0; #else movn $a2, $zero, $t1 # fromIndex = 0; #endif + #if (STRING_COMPRESSION_FEATURE) srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length #endif diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S index 35f20fbf44..ef82bd239d 100644 --- a/runtime/arch/mips64/asm_support_mips64.S +++ b/runtime/arch/mips64/asm_support_mips64.S @@ -70,14 +70,16 @@ // Macros to poison (negate) the reference for heap poisoning. .macro POISON_HEAP_REF rRef #ifdef USE_HEAP_POISONING - subu \rRef, $zero, \rRef + dsubu \rRef, $zero, \rRef + dext \rRef, \rRef, 0, 32 #endif // USE_HEAP_POISONING .endm // Macros to unpoison (negate) the reference for heap poisoning. .macro UNPOISON_HEAP_REF rRef #ifdef USE_HEAP_POISONING - subu \rRef, $zero, \rRef + dsubu \rRef, $zero, \rRef + dext \rRef, \rRef, 0, 32 #endif // USE_HEAP_POISONING .endm diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc index ff2dd1b399..03fc959f6b 100644 --- a/runtime/base/unix_file/fd_file.cc +++ b/runtime/base/unix_file/fd_file.cc @@ -73,7 +73,7 @@ void FdFile::Destroy() { } if (auto_close_ && fd_ != -1) { if (Close() != 0) { - PLOG(WARNING) << "Failed to close file " << file_path_; + PLOG(WARNING) << "Failed to close file with fd=" << fd_ << " path=" << file_path_; } } } diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 2788656735..cac5449c4c 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -752,22 +752,6 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b FindSystemClass(self, "[Ljava/lang/StackTraceElement;")); mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement)); - // Ensure void type is resolved in the core's dex cache so java.lang.Void is correctly - // initialized. - { - const DexFile& dex_file = java_lang_Object->GetDexFile(); - const DexFile::TypeId* void_type_id = dex_file.FindTypeId("V"); - CHECK(void_type_id != nullptr); - dex::TypeIndex void_type_idx = dex_file.GetIndexForTypeId(*void_type_id); - // Now we resolve void type so the dex cache contains it. We use java.lang.Object class - // as referrer so the used dex cache is core's one. - ObjPtr<mirror::Class> resolved_type = ResolveType(dex_file, - void_type_idx, - java_lang_Object.Get()); - CHECK_EQ(resolved_type, GetClassRoot(kPrimitiveVoid)); - self->AssertNoPendingException(); - } - // Create conflict tables that depend on the class linker. runtime->FixupConflictTables(); @@ -1041,7 +1025,8 @@ bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, class_loader->GetClass(); } -static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element) +static bool GetDexPathListElementName(ObjPtr<mirror::Object> element, + ObjPtr<mirror::String>* out_name) REQUIRES_SHARED(Locks::mutator_lock_) { ArtField* const dex_file_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); @@ -1053,17 +1038,20 @@ static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element) CHECK_EQ(dex_file_field->GetDeclaringClass(), element->GetClass()) << element->PrettyTypeOf(); ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element); if (dex_file == nullptr) { - return nullptr; + // Null dex file means it was probably a jar with no dex files, return a null string. + *out_name = nullptr; + return true; } ObjPtr<mirror::Object> name_object = dex_file_name_field->GetObject(dex_file); if (name_object != nullptr) { - return name_object->AsString(); + *out_name = name_object->AsString(); + return true; } - return nullptr; + return false; } static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader, - std::list<mirror::String*>* out_dex_file_names, + std::list<ObjPtr<mirror::String>>* out_dex_file_names, std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(out_dex_file_names != nullptr); @@ -1099,12 +1087,14 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader, *error_msg = StringPrintf("Null dex element at index %d", i); return false; } - ObjPtr<mirror::String> const name = GetDexPathListElementName(element); - if (name == nullptr) { - *error_msg = StringPrintf("Null name for dex element at index %d", i); + ObjPtr<mirror::String> name; + if (!GetDexPathListElementName(element, &name)) { + *error_msg = StringPrintf("Invalid dex path list element at index %d", i); return false; } - out_dex_file_names->push_front(name.Ptr()); + if (name != nullptr) { + out_dex_file_names->push_front(name.Ptr()); + } } } } @@ -1785,14 +1775,14 @@ bool ClassLinker::AddImageSpace( *error_msg = "Unexpected BootClassLoader in app image"; return false; } - std::list<mirror::String*> image_dex_file_names; + std::list<ObjPtr<mirror::String>> image_dex_file_names; std::string temp_error_msg; if (!FlattenPathClassLoader(image_class_loader.Get(), &image_dex_file_names, &temp_error_msg)) { *error_msg = StringPrintf("Failed to flatten image class loader hierarchy '%s'", temp_error_msg.c_str()); return false; } - std::list<mirror::String*> loader_dex_file_names; + std::list<ObjPtr<mirror::String>> loader_dex_file_names; if (!FlattenPathClassLoader(class_loader.Get(), &loader_dex_file_names, &temp_error_msg)) { *error_msg = StringPrintf("Failed to flatten class loader hierarchy '%s'", temp_error_msg.c_str()); @@ -1804,7 +1794,10 @@ bool ClassLinker::AddImageSpace( ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i); if (element != nullptr) { // If we are somewhere in the middle of the array, there may be nulls at the end. - loader_dex_file_names.push_back(GetDexPathListElementName(element)); + ObjPtr<mirror::String> name; + if (GetDexPathListElementName(element, &name) && name != nullptr) { + loader_dex_file_names.push_back(name); + } } } // Ignore the number of image dex files since we are adding those to the class loader anyways. @@ -4162,19 +4155,6 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, return false; } - // We may be running with a preopted oat file but without image. In this case, - // we don't skip verification of skip_access_checks classes to ensure we initialize - // dex caches with all types resolved during verification. - // We need to trust image classes, as these might be coming out of a pre-opted, quickened boot - // image (that we just failed loading), and the verifier can't be run on quickened opcodes when - // the runtime isn't started. On the other hand, app classes can be re-verified even if they are - // already pre-opted, as then the runtime is started. - if (!Runtime::Current()->IsAotCompiler() && - !Runtime::Current()->GetHeap()->HasBootImageSpace() && - klass->GetClassLoader() != nullptr) { - return false; - } - uint16_t class_def_index = klass->GetDexClassDefIndex(); oat_file_class_status = oat_dex_file->GetOatClass(class_def_index).GetStatus(); if (oat_file_class_status == mirror::Class::kStatusVerified || diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h index 86266e2500..e77a5b8e39 100644 --- a/runtime/gc/accounting/read_barrier_table.h +++ b/runtime/gc/accounting/read_barrier_table.h @@ -80,7 +80,7 @@ class ReadBarrierTable { } // This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h. - static constexpr size_t kRegionSize = 1 * MB; + static constexpr size_t kRegionSize = 256 * KB; private: static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb. diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 8f9c187e1d..aea9708ddc 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -1644,10 +1644,10 @@ void ConcurrentCopying::ReclaimPhase() { // Record freed objects. TimingLogger::ScopedTiming split2("RecordFree", GetTimings()); // Don't include thread-locals that are in the to-space. - uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); - uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); - uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); - uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); + const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace(); + const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); + const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); + const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes); uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); @@ -1658,8 +1658,18 @@ void ConcurrentCopying::ReclaimPhase() { } CHECK_LE(to_objects, from_objects); CHECK_LE(to_bytes, from_bytes); - int64_t freed_bytes = from_bytes - to_bytes; - int64_t freed_objects = from_objects - to_objects; + // cleared_bytes and cleared_objects may be greater than the from space equivalents since + // ClearFromSpace may clear empty unevac regions. + uint64_t cleared_bytes; + uint64_t cleared_objects; + { + TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); + region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects); + CHECK_GE(cleared_bytes, from_bytes); + CHECK_GE(cleared_objects, from_objects); + } + int64_t freed_bytes = cleared_bytes - to_bytes; + int64_t freed_objects = cleared_objects - to_objects; if (kVerboseMode) { LOG(INFO) << "RecordFree:" << " from_bytes=" << from_bytes << " from_objects=" << from_objects @@ -1678,11 +1688,6 @@ void ConcurrentCopying::ReclaimPhase() { } { - TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings()); - region_space_->ClearFromSpace(); - } - - { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); Sweep(false); SwapBitmaps(); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 12b9701845..b857ea3eef 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -3559,11 +3559,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran, collector::GcType gc_type = collector_ran->GetGcType(); const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for // foreground. - // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics. - const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier), - static_cast<uint64_t>(5 * MB / 2)); - const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier), - static_cast<uint64_t>(5 * MB / 2)); + const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier); + const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier); if (gc_type != collector::kGcTypeSticky) { // Grow the heap for non sticky GC. ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated; diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index 3e79223498..5809027235 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -78,7 +78,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by for (size_t i = 0; i < num_regions_; ++i) { Region* r = ®ions_[i]; if (r->IsFree()) { - r->Unfree(time_); + r->Unfree(this, time_); r->SetNewlyAllocated(); ++num_non_free_regions_; obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); @@ -91,7 +91,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by for (size_t i = 0; i < num_regions_; ++i) { Region* r = ®ions_[i]; if (r->IsFree()) { - r->Unfree(time_); + r->Unfree(this, time_); ++num_non_free_regions_; obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); CHECK(obj != nullptr); @@ -233,10 +233,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) { continue; } if (r->IsLarge()) { + // Avoid visiting dead large objects since they may contain dangling pointers to the + // from-space. + DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object"; mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin()); - if (obj->GetClass() != nullptr) { - callback(obj, arg); - } + DCHECK(obj->GetClass() != nullptr); + callback(obj, arg); } else if (r->IsLargeTail()) { // Do nothing. } else { @@ -310,13 +312,13 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate DCHECK_EQ(left + num_regs, right); Region* first_reg = ®ions_[left]; DCHECK(first_reg->IsFree()); - first_reg->UnfreeLarge(time_); + first_reg->UnfreeLarge(this, time_); ++num_non_free_regions_; first_reg->SetTop(first_reg->Begin() + num_bytes); for (size_t p = left + 1; p < right; ++p) { DCHECK_LT(p, num_regions_); DCHECK(regions_[p].IsFree()); - regions_[p].UnfreeLargeTail(time_); + regions_[p].UnfreeLargeTail(this, time_); ++num_non_free_regions_; } *bytes_allocated = num_bytes; diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 321524cbbd..1ad48438ba 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -86,6 +86,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map) num_regions_ = mem_map_size / kRegionSize; num_non_free_regions_ = 0U; DCHECK_GT(num_regions_, 0U); + non_free_region_index_limit_ = 0U; regions_.reset(new Region[num_regions_]); uint8_t* region_addr = mem_map->Begin(); for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) { @@ -192,7 +193,11 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc MutexLock mu(Thread::Current(), region_lock_); size_t num_expected_large_tails = 0; bool prev_large_evacuated = false; - for (size_t i = 0; i < num_regions_; ++i) { + VerifyNonFreeRegionLimit(); + const size_t iter_limit = kUseTableLookupReadBarrier + ? num_regions_ + : std::min(num_regions_, non_free_region_index_limit_); + for (size_t i = 0; i < iter_limit; ++i) { Region* r = ®ions_[i]; RegionState state = r->State(); RegionType type = r->Type(); @@ -236,18 +241,50 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc } } } + DCHECK_EQ(num_expected_large_tails, 0U); current_region_ = &full_region_; evac_region_ = &full_region_; } -void RegionSpace::ClearFromSpace() { +void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) { + DCHECK(cleared_bytes != nullptr); + DCHECK(cleared_objects != nullptr); + *cleared_bytes = 0; + *cleared_objects = 0; MutexLock mu(Thread::Current(), region_lock_); - for (size_t i = 0; i < num_regions_; ++i) { + VerifyNonFreeRegionLimit(); + size_t new_non_free_region_index_limit = 0; + for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) { Region* r = ®ions_[i]; if (r->IsInFromSpace()) { - r->Clear(); + *cleared_bytes += r->BytesAllocated(); + *cleared_objects += r->ObjectsAllocated(); --num_non_free_regions_; + r->Clear(); } else if (r->IsInUnevacFromSpace()) { + if (r->LiveBytes() == 0) { + // Special case for 0 live bytes, this means all of the objects in the region are dead and + // we can clear it. This is important for large objects since we must not visit dead ones in + // RegionSpace::Walk because they may contain dangling references to invalid objects. + // It is also better to clear these regions now instead of at the end of the next GC to + // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal + // live percent evacuation logic. + size_t free_regions = 1; + // Also release RAM for large tails. + while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) { + DCHECK(r->IsLarge()); + regions_[i + free_regions].Clear(); + ++free_regions; + } + *cleared_bytes += r->BytesAllocated(); + *cleared_objects += r->ObjectsAllocated(); + num_non_free_regions_ -= free_regions; + r->Clear(); + GetLiveBitmap()->ClearRange( + reinterpret_cast<mirror::Object*>(r->Begin()), + reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize)); + continue; + } size_t full_count = 0; while (r->IsInUnevacFromSpace()) { Region* const cur = ®ions_[i + full_count]; @@ -255,6 +292,7 @@ void RegionSpace::ClearFromSpace() { cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) { break; } + DCHECK(cur->IsInUnevacFromSpace()); if (full_count != 0) { cur->SetUnevacFromSpaceAsToSpace(); } @@ -271,7 +309,15 @@ void RegionSpace::ClearFromSpace() { i += full_count - 1; } } + // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above. + Region* last_checked_region = ®ions_[i]; + if (!last_checked_region->IsFree()) { + new_non_free_region_index_limit = std::max(new_non_free_region_index_limit, + last_checked_region->Idx() + 1); + } } + // Update non_free_region_index_limit_. + SetNonFreeRegionLimit(new_non_free_region_index_limit); evac_region_ = nullptr; } @@ -324,6 +370,7 @@ void RegionSpace::Clear() { } r->Clear(); } + SetNonFreeRegionLimit(0); current_region_ = &full_region_; evac_region_ = &full_region_; } @@ -390,7 +437,7 @@ bool RegionSpace::AllocNewTlab(Thread* self) { for (size_t i = 0; i < num_regions_; ++i) { Region* r = ®ions_[i]; if (r->IsFree()) { - r->Unfree(time_); + r->Unfree(this, time_); ++num_non_free_regions_; r->SetNewlyAllocated(); r->SetTop(r->End()); diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index da36f5c55d..253792993b 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -167,7 +167,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Object alignment within the space. static constexpr size_t kAlignment = kObjectAlignment; // The region size. - static constexpr size_t kRegionSize = 1 * MB; + static constexpr size_t kRegionSize = 256 * KB; bool IsInFromSpace(mirror::Object* ref) { if (HasAddress(ref)) { @@ -215,7 +215,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { size_t FromSpaceSize() REQUIRES(!region_lock_); size_t UnevacFromSpaceSize() REQUIRES(!region_lock_); size_t ToSpaceSize() REQUIRES(!region_lock_); - void ClearFromSpace() REQUIRES(!region_lock_); + void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_); void AddLiveBytes(mirror::Object* ref, size_t alloc_size) { Region* reg = RefToRegionUnlocked(ref); @@ -308,25 +308,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { } // Given a free region, declare it non-free (allocated). - void Unfree(uint32_t alloc_time) { + void Unfree(RegionSpace* region_space, uint32_t alloc_time) + REQUIRES(region_space->region_lock_) { DCHECK(IsFree()); state_ = RegionState::kRegionStateAllocated; type_ = RegionType::kRegionTypeToSpace; alloc_time_ = alloc_time; + region_space->AdjustNonFreeRegionLimit(idx_); } - void UnfreeLarge(uint32_t alloc_time) { + void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) + REQUIRES(region_space->region_lock_) { DCHECK(IsFree()); state_ = RegionState::kRegionStateLarge; type_ = RegionType::kRegionTypeToSpace; alloc_time_ = alloc_time; + region_space->AdjustNonFreeRegionLimit(idx_); } - void UnfreeLargeTail(uint32_t alloc_time) { + void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) + REQUIRES(region_space->region_lock_) { DCHECK(IsFree()); state_ = RegionState::kRegionStateLargeTail; type_ = RegionType::kRegionTypeToSpace; alloc_time_ = alloc_time; + region_space->AdjustNonFreeRegionLimit(idx_); } void SetNewlyAllocated() { @@ -342,7 +348,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { bool IsLarge() const { bool is_large = state_ == RegionState::kRegionStateLarge; if (is_large) { - DCHECK_LT(begin_ + 1 * MB, Top()); + DCHECK_LT(begin_ + kRegionSize, Top()); } return is_large; } @@ -429,7 +435,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { size_t ObjectsAllocated() const { if (IsLarge()) { - DCHECK_LT(begin_ + 1 * MB, Top()); + DCHECK_LT(begin_ + kRegionSize, Top()); DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U); return 1; } else if (IsLargeTail()) { @@ -520,6 +526,27 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { mirror::Object* GetNextObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); + void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) { + DCHECK_LT(new_non_free_region_index, num_regions_); + non_free_region_index_limit_ = std::max(non_free_region_index_limit_, + new_non_free_region_index + 1); + VerifyNonFreeRegionLimit(); + } + + void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) { + DCHECK_LE(new_non_free_region_index_limit, num_regions_); + non_free_region_index_limit_ = new_non_free_region_index_limit; + VerifyNonFreeRegionLimit(); + } + + void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) { + if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) { + for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) { + CHECK(regions_[i].IsFree()); + } + } + } + Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; uint32_t time_; // The time as the number of collections since the startup. @@ -527,6 +554,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { size_t num_non_free_regions_; // The number of non-free regions in this space. std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_); // The pointer to the region array. + // The upper-bound index of the non-free regions. Used to avoid scanning all regions in + // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is + // true. + size_t non_free_region_index_limit_ GUARDED_BY(region_lock_); Region* current_region_; // The region that's being allocated currently. Region* evac_region_; // The region that's being evacuated to currently. Region full_region_; // The dummy/sentinel region that looks full. diff --git a/runtime/image.cc b/runtime/image.cc index 243051e3bd..88f28f3ea1 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -25,7 +25,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '9', '\0' }; // Enable string compression. +const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '0', '\0' }; // Integer.valueOf intrinsic ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S index 2b81f5069f..3c813efb31 100644 --- a/runtime/interpreter/mterp/arm/op_sget.S +++ b/runtime/interpreter/mterp/arm/op_sget.S @@ -1,4 +1,4 @@ -%default { "is_object":"0", "helper":"artGet32StaticFromCode" } +%default { "is_object":"0", "helper":"MterpGet32Static" } /* * General SGET handler wrapper. * diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S index ebfb44cb20..eb06aa881c 100644 --- a/runtime/interpreter/mterp/arm/op_sget_boolean.S +++ b/runtime/interpreter/mterp/arm/op_sget_boolean.S @@ -1 +1 @@ -%include "arm/op_sget.S" {"helper":"artGetBooleanStaticFromCode"} +%include "arm/op_sget.S" {"helper":"MterpGetBooleanStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S index d76862e600..9f4c9046a2 100644 --- a/runtime/interpreter/mterp/arm/op_sget_byte.S +++ b/runtime/interpreter/mterp/arm/op_sget_byte.S @@ -1 +1 @@ -%include "arm/op_sget.S" {"helper":"artGetByteStaticFromCode"} +%include "arm/op_sget.S" {"helper":"MterpGetByteStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S index b7fcfc23d4..dd8c991264 100644 --- a/runtime/interpreter/mterp/arm/op_sget_char.S +++ b/runtime/interpreter/mterp/arm/op_sget_char.S @@ -1 +1 @@ -%include "arm/op_sget.S" {"helper":"artGetCharStaticFromCode"} +%include "arm/op_sget.S" {"helper":"MterpGetCharStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S index 8e7d075b72..e1d9eaee29 100644 --- a/runtime/interpreter/mterp/arm/op_sget_object.S +++ b/runtime/interpreter/mterp/arm/op_sget_object.S @@ -1 +1 @@ -%include "arm/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"} +%include "arm/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S index 3e80f0da87..c0d61c4d33 100644 --- a/runtime/interpreter/mterp/arm/op_sget_short.S +++ b/runtime/interpreter/mterp/arm/op_sget_short.S @@ -1 +1 @@ -%include "arm/op_sget.S" {"helper":"artGetShortStaticFromCode"} +%include "arm/op_sget.S" {"helper":"MterpGetShortStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S index 4f2f89d6c3..aeee016294 100644 --- a/runtime/interpreter/mterp/arm/op_sget_wide.S +++ b/runtime/interpreter/mterp/arm/op_sget_wide.S @@ -4,12 +4,12 @@ */ /* sget-wide vAA, field@BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGet64StaticFromCode + bl MterpGet64Static ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r9, rINST, lsr #8 @ r9<- AA VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA] diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S index 7e0c1a64ef..494df8aa5d 100644 --- a/runtime/interpreter/mterp/arm/op_sput.S +++ b/runtime/interpreter/mterp/arm/op_sput.S @@ -1,4 +1,4 @@ -%default { "helper":"artSet32StaticFromCode"} +%default { "helper":"MterpSet32Static"} /* * General SPUT handler wrapper. * diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S index e3bbf2b8ba..47bed0a2ce 100644 --- a/runtime/interpreter/mterp/arm/op_sput_boolean.S +++ b/runtime/interpreter/mterp/arm/op_sput_boolean.S @@ -1 +1 @@ -%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "arm/op_sput.S" {"helper":"MterpSetBooleanStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S index e3bbf2b8ba..b4d22b4fd8 100644 --- a/runtime/interpreter/mterp/arm/op_sput_byte.S +++ b/runtime/interpreter/mterp/arm/op_sput_byte.S @@ -1 +1 @@ -%include "arm/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "arm/op_sput.S" {"helper":"MterpSetByteStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S index d8d65cb5ef..58a957d1f6 100644 --- a/runtime/interpreter/mterp/arm/op_sput_char.S +++ b/runtime/interpreter/mterp/arm/op_sput_char.S @@ -1 +1 @@ -%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "arm/op_sput.S" {"helper":"MterpSetCharStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S index d8d65cb5ef..88c321127e 100644 --- a/runtime/interpreter/mterp/arm/op_sput_short.S +++ b/runtime/interpreter/mterp/arm/op_sput_short.S @@ -1 +1 @@ -%include "arm/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "arm/op_sput.S" {"helper":"MterpSetShortStatic"} diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S index 8d8ed8c4a2..1e8fcc9b75 100644 --- a/runtime/interpreter/mterp/arm/op_sput_wide.S +++ b/runtime/interpreter/mterp/arm/op_sput_wide.S @@ -3,15 +3,15 @@ * */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB - ldr r1, [rFP, #OFF_FP_METHOD] - mov r2, rINST, lsr #8 @ r3<- AA - VREG_INDEX_TO_ADDR r2, r2 + mov r1, rINST, lsr #8 @ r1<- AA + VREG_INDEX_TO_ADDR r1, r1 + ldr r2, [rFP, #OFF_FP_METHOD] mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC - bl artSet64IndirectStaticFromMterp + bl MterpSet64Static cmp r0, #0 @ 0 on success, -1 on failure bne MterpException ADVANCE 2 @ Past exception point - now advance rPC diff --git a/runtime/interpreter/mterp/arm64/op_sget.S b/runtime/interpreter/mterp/arm64/op_sget.S index 6352ce0597..84e71ac15e 100644 --- a/runtime/interpreter/mterp/arm64/op_sget.S +++ b/runtime/interpreter/mterp/arm64/op_sget.S @@ -1,4 +1,4 @@ -%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" } +%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" } /* * General SGET handler wrapper. * diff --git a/runtime/interpreter/mterp/arm64/op_sget_boolean.S b/runtime/interpreter/mterp/arm64/op_sget_boolean.S index c40dbdd7d7..868f41cb7f 100644 --- a/runtime/interpreter/mterp/arm64/op_sget_boolean.S +++ b/runtime/interpreter/mterp/arm64/op_sget_boolean.S @@ -1 +1 @@ -%include "arm64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"uxtb w0, w0"} +%include "arm64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"uxtb w0, w0"} diff --git a/runtime/interpreter/mterp/arm64/op_sget_byte.S b/runtime/interpreter/mterp/arm64/op_sget_byte.S index 6cf69a382a..e135aa737a 100644 --- a/runtime/interpreter/mterp/arm64/op_sget_byte.S +++ b/runtime/interpreter/mterp/arm64/op_sget_byte.S @@ -1 +1 @@ -%include "arm64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"sxtb w0, w0"} +%include "arm64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"sxtb w0, w0"} diff --git a/runtime/interpreter/mterp/arm64/op_sget_char.S b/runtime/interpreter/mterp/arm64/op_sget_char.S index 8924a349ab..05d57ac20b 100644 --- a/runtime/interpreter/mterp/arm64/op_sget_char.S +++ b/runtime/interpreter/mterp/arm64/op_sget_char.S @@ -1 +1 @@ -%include "arm64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"uxth w0, w0"} +%include "arm64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"uxth w0, w0"} diff --git a/runtime/interpreter/mterp/arm64/op_sget_object.S b/runtime/interpreter/mterp/arm64/op_sget_object.S index 620b0bab00..1faaf6eb8e 100644 --- a/runtime/interpreter/mterp/arm64/op_sget_object.S +++ b/runtime/interpreter/mterp/arm64/op_sget_object.S @@ -1 +1 @@ -%include "arm64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"} +%include "arm64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"} diff --git a/runtime/interpreter/mterp/arm64/op_sget_short.S b/runtime/interpreter/mterp/arm64/op_sget_short.S index 19dbba6f74..5900231b06 100644 --- a/runtime/interpreter/mterp/arm64/op_sget_short.S +++ b/runtime/interpreter/mterp/arm64/op_sget_short.S @@ -1 +1 @@ -%include "arm64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"sxth w0, w0"} +%include "arm64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"sxth w0, w0"} diff --git a/runtime/interpreter/mterp/arm64/op_sget_wide.S b/runtime/interpreter/mterp/arm64/op_sget_wide.S index 287f66daeb..92f3f7dd66 100644 --- a/runtime/interpreter/mterp/arm64/op_sget_wide.S +++ b/runtime/interpreter/mterp/arm64/op_sget_wide.S @@ -4,12 +4,12 @@ */ /* sget-wide vAA, field//BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64StaticFromCode EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGet64StaticFromCode + bl MterpGet64Static ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w4, wINST, #8 // w4<- AA cbnz x3, MterpException // bail out diff --git a/runtime/interpreter/mterp/arm64/op_sput.S b/runtime/interpreter/mterp/arm64/op_sput.S index 75f27abdcc..e322af0e76 100644 --- a/runtime/interpreter/mterp/arm64/op_sput.S +++ b/runtime/interpreter/mterp/arm64/op_sput.S @@ -1,4 +1,4 @@ -%default { "helper":"artSet32StaticFromCode"} +%default { "helper":"MterpSet32Static"} /* * General SPUT handler wrapper. * diff --git a/runtime/interpreter/mterp/arm64/op_sput_boolean.S b/runtime/interpreter/mterp/arm64/op_sput_boolean.S index 11c55e529b..9928f31c98 100644 --- a/runtime/interpreter/mterp/arm64/op_sput_boolean.S +++ b/runtime/interpreter/mterp/arm64/op_sput_boolean.S @@ -1 +1 @@ -%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "arm64/op_sput.S" {"helper":"MterpSetBooleanStatic"} diff --git a/runtime/interpreter/mterp/arm64/op_sput_byte.S b/runtime/interpreter/mterp/arm64/op_sput_byte.S index 11c55e529b..16d6ba96e0 100644 --- a/runtime/interpreter/mterp/arm64/op_sput_byte.S +++ b/runtime/interpreter/mterp/arm64/op_sput_byte.S @@ -1 +1 @@ -%include "arm64/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "arm64/op_sput.S" {"helper":"MterpSetByteStatic"} diff --git a/runtime/interpreter/mterp/arm64/op_sput_char.S b/runtime/interpreter/mterp/arm64/op_sput_char.S index b4dd5aa76c..ab5e8152b9 100644 --- a/runtime/interpreter/mterp/arm64/op_sput_char.S +++ b/runtime/interpreter/mterp/arm64/op_sput_char.S @@ -1 +1 @@ -%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "arm64/op_sput.S" {"helper":"MterpSetCharStatic"} diff --git a/runtime/interpreter/mterp/arm64/op_sput_short.S b/runtime/interpreter/mterp/arm64/op_sput_short.S index b4dd5aa76c..b54f88ad48 100644 --- a/runtime/interpreter/mterp/arm64/op_sput_short.S +++ b/runtime/interpreter/mterp/arm64/op_sput_short.S @@ -1 +1 @@ -%include "arm64/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "arm64/op_sput.S" {"helper":"MterpSetShortStatic"} diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S index a79b1a6172..4aeb8ff316 100644 --- a/runtime/interpreter/mterp/arm64/op_sput_wide.S +++ b/runtime/interpreter/mterp/arm64/op_sput_wide.S @@ -3,15 +3,15 @@ * */ /* sput-wide vAA, field//BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB - ldr x1, [xFP, #OFF_FP_METHOD] - lsr w2, wINST, #8 // w3<- AA - VREG_INDEX_TO_ADDR x2, w2 + lsr w1, wINST, #8 // w1<- AA + VREG_INDEX_TO_ADDR x1, w1 + ldr x2, [xFP, #OFF_FP_METHOD] mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC - bl artSet64IndirectStaticFromMterp + bl MterpSet64Static cbnz w0, MterpException // 0 on success, -1 on failure ADVANCE 2 // Past exception point - now advance rPC GET_INST_OPCODE ip // extract opcode from wINST diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S index 64ece1e1c8..635df8aa1f 100644 --- a/runtime/interpreter/mterp/mips/op_sget.S +++ b/runtime/interpreter/mterp/mips/op_sget.S @@ -1,4 +1,4 @@ -%default { "is_object":"0", "helper":"artGet32StaticFromCode" } +%default { "is_object":"0", "helper":"MterpGet32Static" } /* * General SGET handler. * diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S index 45a5a70228..7829970d84 100644 --- a/runtime/interpreter/mterp/mips/op_sget_boolean.S +++ b/runtime/interpreter/mterp/mips/op_sget_boolean.S @@ -1 +1 @@ -%include "mips/op_sget.S" {"helper":"artGetBooleanStaticFromCode"} +%include "mips/op_sget.S" {"helper":"MterpGetBooleanStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S index 319122cac0..ee0834201b 100644 --- a/runtime/interpreter/mterp/mips/op_sget_byte.S +++ b/runtime/interpreter/mterp/mips/op_sget_byte.S @@ -1 +1 @@ -%include "mips/op_sget.S" {"helper":"artGetByteStaticFromCode"} +%include "mips/op_sget.S" {"helper":"MterpGetByteStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S index 71038478e0..d8b477a7bc 100644 --- a/runtime/interpreter/mterp/mips/op_sget_char.S +++ b/runtime/interpreter/mterp/mips/op_sget_char.S @@ -1 +1 @@ -%include "mips/op_sget.S" {"helper":"artGetCharStaticFromCode"} +%include "mips/op_sget.S" {"helper":"MterpGetCharStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S index b205f513aa..2dc00c386c 100644 --- a/runtime/interpreter/mterp/mips/op_sget_object.S +++ b/runtime/interpreter/mterp/mips/op_sget_object.S @@ -1 +1 @@ -%include "mips/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"} +%include "mips/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S index 3301823d86..ab55d93060 100644 --- a/runtime/interpreter/mterp/mips/op_sget_short.S +++ b/runtime/interpreter/mterp/mips/op_sget_short.S @@ -1 +1 @@ -%include "mips/op_sget.S" {"helper":"artGetShortStaticFromCode"} +%include "mips/op_sget.S" {"helper":"MterpGetShortStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S index c729250003..ec4295ad03 100644 --- a/runtime/interpreter/mterp/mips/op_sget_wide.S +++ b/runtime/interpreter/mterp/mips/op_sget_wide.S @@ -2,12 +2,12 @@ * 64-bit SGET handler. */ /* sget-wide vAA, field@BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGet64StaticFromCode) + JAL(MterpGet64Static) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) bnez a3, MterpException GET_OPA(a1) # a1 <- AA diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S index 7034a0e740..37f8687aaa 100644 --- a/runtime/interpreter/mterp/mips/op_sput.S +++ b/runtime/interpreter/mterp/mips/op_sput.S @@ -1,4 +1,4 @@ -%default { "helper":"artSet32StaticFromCode"} +%default { "helper":"MterpSet32Static"} /* * General SPUT handler. * diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S index 7909ef5622..6426cd40eb 100644 --- a/runtime/interpreter/mterp/mips/op_sput_boolean.S +++ b/runtime/interpreter/mterp/mips/op_sput_boolean.S @@ -1 +1 @@ -%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "mips/op_sput.S" {"helper":"MterpSetBooleanStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S index 7909ef5622..c68d18f2f7 100644 --- a/runtime/interpreter/mterp/mips/op_sput_byte.S +++ b/runtime/interpreter/mterp/mips/op_sput_byte.S @@ -1 +1 @@ -%include "mips/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "mips/op_sput.S" {"helper":"MterpSetByteStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S index 188195cc3a..9b8983e4c6 100644 --- a/runtime/interpreter/mterp/mips/op_sput_char.S +++ b/runtime/interpreter/mterp/mips/op_sput_char.S @@ -1 +1 @@ -%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "mips/op_sput.S" {"helper":"MterpSetCharStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S index 188195cc3a..5a57ed9922 100644 --- a/runtime/interpreter/mterp/mips/op_sput_short.S +++ b/runtime/interpreter/mterp/mips/op_sput_short.S @@ -1 +1 @@ -%include "mips/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "mips/op_sput.S" {"helper":"MterpSetShortStatic"} diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S index 3b347fc008..c090007968 100644 --- a/runtime/interpreter/mterp/mips/op_sput_wide.S +++ b/runtime/interpreter/mterp/mips/op_sput_wide.S @@ -2,15 +2,15 @@ * 64-bit SPUT handler. */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC() FETCH(a0, 1) # a0 <- field ref CCCC - lw a1, OFF_FP_METHOD(rFP) # a1 <- method - GET_OPA(a2) # a2 <- AA - EAS2(a2, rFP, a2) # a2 <- &fp[AA] + GET_OPA(a1) # a1 <- AA + EAS2(a1, rFP, a1) # a1 <- &fp[AA] + lw a2, OFF_FP_METHOD(rFP) # a2 <- method move a3, rSELF # a3 <- self PREFETCH_INST(2) # load rINST - JAL(artSet64IndirectStaticFromMterp) + JAL(MterpSet64Static) bnez v0, MterpException # bail out ADVANCE(2) # advance rPC GET_INST_OPCODE(t0) # extract opcode from rINST diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S index bd2cfe3778..71046dba1a 100644 --- a/runtime/interpreter/mterp/mips64/op_sget.S +++ b/runtime/interpreter/mterp/mips64/op_sget.S @@ -1,4 +1,4 @@ -%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" } +%default { "is_object":"0", "helper":"MterpGet32Static", "extend":"" } /* * General SGET handler wrapper. * diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S index e7b1844d86..ec1ce9eb14 100644 --- a/runtime/interpreter/mterp/mips64/op_sget_boolean.S +++ b/runtime/interpreter/mterp/mips64/op_sget_boolean.S @@ -1 +1 @@ -%include "mips64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"and v0, v0, 0xff"} +%include "mips64/op_sget.S" {"helper":"MterpGetBooleanStatic", "extend":"and v0, v0, 0xff"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S index 52a2e4a5d5..6a802f63ea 100644 --- a/runtime/interpreter/mterp/mips64/op_sget_byte.S +++ b/runtime/interpreter/mterp/mips64/op_sget_byte.S @@ -1 +1 @@ -%include "mips64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"seb v0, v0"} +%include "mips64/op_sget.S" {"helper":"MterpGetByteStatic", "extend":"seb v0, v0"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S index 873d82a0d6..483d085719 100644 --- a/runtime/interpreter/mterp/mips64/op_sget_char.S +++ b/runtime/interpreter/mterp/mips64/op_sget_char.S @@ -1 +1 @@ -%include "mips64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"and v0, v0, 0xffff"} +%include "mips64/op_sget.S" {"helper":"MterpGetCharStatic", "extend":"and v0, v0, 0xffff"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S index 3108417e00..2250696a97 100644 --- a/runtime/interpreter/mterp/mips64/op_sget_object.S +++ b/runtime/interpreter/mterp/mips64/op_sget_object.S @@ -1 +1 @@ -%include "mips64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"} +%include "mips64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S index fed4e76baa..b257bbbba1 100644 --- a/runtime/interpreter/mterp/mips64/op_sget_short.S +++ b/runtime/interpreter/mterp/mips64/op_sget_short.S @@ -1 +1 @@ -%include "mips64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"seh v0, v0"} +%include "mips64/op_sget.S" {"helper":"MterpGetShortStatic", "extend":"seh v0, v0"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S index 77124d1d8d..ace64f8e80 100644 --- a/runtime/interpreter/mterp/mips64/op_sget_wide.S +++ b/runtime/interpreter/mterp/mips64/op_sget_wide.S @@ -3,12 +3,12 @@ * */ /* sget-wide vAA, field//BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGet64StaticFromCode + jal MterpGet64Static ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a4, rINST, 8 # a4 <- AA bnez a3, MterpException # bail out diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S index 142f18f3ba..466f3339c2 100644 --- a/runtime/interpreter/mterp/mips64/op_sput.S +++ b/runtime/interpreter/mterp/mips64/op_sput.S @@ -1,4 +1,4 @@ -%default { "helper":"artSet32StaticFromCode" } +%default { "helper":"MterpSet32Static" } /* * General SPUT handler wrapper. * diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S index f5b8dbf433..eba58f7fa1 100644 --- a/runtime/interpreter/mterp/mips64/op_sput_boolean.S +++ b/runtime/interpreter/mterp/mips64/op_sput_boolean.S @@ -1 +1 @@ -%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "mips64/op_sput.S" {"helper":"MterpSetBooleanStatic"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S index f5b8dbf433..80a26c0161 100644 --- a/runtime/interpreter/mterp/mips64/op_sput_byte.S +++ b/runtime/interpreter/mterp/mips64/op_sput_byte.S @@ -1 +1 @@ -%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "mips64/op_sput.S" {"helper":"MterpSetByteStatic"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S index c4d195c82f..c0d5bf3bba 100644 --- a/runtime/interpreter/mterp/mips64/op_sput_char.S +++ b/runtime/interpreter/mterp/mips64/op_sput_char.S @@ -1 +1 @@ -%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "mips64/op_sput.S" {"helper":"MterpSetCharStatic"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S index c4d195c82f..b001832bc4 100644 --- a/runtime/interpreter/mterp/mips64/op_sput_short.S +++ b/runtime/interpreter/mterp/mips64/op_sput_short.S @@ -1 +1 @@ -%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "mips64/op_sput.S" {"helper":"MterpSetShortStatic"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S index 828ddc15e7..aa3d5b4157 100644 --- a/runtime/interpreter/mterp/mips64/op_sput_wide.S +++ b/runtime/interpreter/mterp/mips64/op_sput_wide.S @@ -3,15 +3,15 @@ * */ /* sput-wide vAA, field//BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB - ld a1, OFF_FP_METHOD(rFP) - srl a2, rINST, 8 # a2 <- AA - dlsa a2, a2, rFP, 2 + srl a1, rINST, 8 # a2 <- AA + dlsa a1, a1, rFP, 2 + ld a2, OFF_FP_METHOD(rFP) move a3, rSELF PREFETCH_INST 2 # Get next inst, but don't advance rPC - jal artSet64IndirectStaticFromMterp + jal MterpSet64Static bnezc v0, MterpException # 0 on success, -1 on failure ADVANCE 2 # Past exception point - now advance rPC GET_INST_OPCODE v0 # extract opcode from rINST diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index 75ab91acba..8bf094e1b8 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -587,27 +587,6 @@ extern "C" size_t MterpSuspendCheck(Thread* self) return MterpShouldSwitchInterpreters(); } -extern "C" ssize_t artSet64IndirectStaticFromMterp(uint32_t field_idx, - ArtMethod* referrer, - uint64_t* new_value, - Thread* self) - REQUIRES_SHARED(Locks::mutator_lock_) { - ScopedQuickEntrypointChecks sqec(self); - ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); - if (LIKELY(field != nullptr)) { - // Compiled code can't use transactional mode. - field->Set64<false>(field->GetDeclaringClass(), *new_value); - return 0; // success - } - field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t)); - if (LIKELY(field != nullptr)) { - // Compiled code can't use transactional mode. - field->Set64<false>(field->GetDeclaringClass(), *new_value); - return 0; // success - } - return -1; // failure -} - extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint8_t new_value, @@ -689,7 +668,187 @@ extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx, return -1; // failure } -extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index) +template <typename return_type, Primitive::Type primitive_type> +ALWAYS_INLINE return_type MterpGetStatic(uint32_t field_idx, + ArtMethod* referrer, + Thread* self, + return_type (ArtField::*func)(ObjPtr<mirror::Object>)) + REQUIRES_SHARED(Locks::mutator_lock_) { + return_type res = 0; // On exception, the result will be ignored. + ArtField* f = + FindFieldFromCode<StaticPrimitiveRead, false>(field_idx, + referrer, + self, + primitive_type); + if (LIKELY(f != nullptr)) { + ObjPtr<mirror::Object> obj = f->GetDeclaringClass(); + res = (f->*func)(obj); + } + return res; +} + +extern "C" int32_t MterpGetBooleanStatic(uint32_t field_idx, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpGetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx, + referrer, + self, + &ArtField::GetBoolean); +} + +extern "C" int32_t MterpGetByteStatic(uint32_t field_idx, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpGetStatic<int8_t, Primitive::kPrimByte>(field_idx, + referrer, + self, + &ArtField::GetByte); +} + +extern "C" uint32_t MterpGetCharStatic(uint32_t field_idx, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpGetStatic<uint16_t, Primitive::kPrimChar>(field_idx, + referrer, + self, + &ArtField::GetChar); +} + +extern "C" int32_t MterpGetShortStatic(uint32_t field_idx, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpGetStatic<int16_t, Primitive::kPrimShort>(field_idx, + referrer, + self, + &ArtField::GetShort); +} + +extern "C" mirror::Object* MterpGetObjStatic(uint32_t field_idx, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpGetStatic<ObjPtr<mirror::Object>, Primitive::kPrimNot>(field_idx, + referrer, + self, + &ArtField::GetObject).Ptr(); +} + +extern "C" int32_t MterpGet32Static(uint32_t field_idx, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpGetStatic<int32_t, Primitive::kPrimInt>(field_idx, + referrer, + self, + &ArtField::GetInt); +} + +extern "C" int64_t MterpGet64Static(uint32_t field_idx, ArtMethod* referrer, Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpGetStatic<int64_t, Primitive::kPrimLong>(field_idx, + referrer, + self, + &ArtField::GetLong); +} + + +template <typename field_type, Primitive::Type primitive_type> +int MterpSetStatic(uint32_t field_idx, + field_type new_value, + ArtMethod* referrer, + Thread* self, + void (ArtField::*func)(ObjPtr<mirror::Object>, field_type val)) + REQUIRES_SHARED(Locks::mutator_lock_) { + int res = 0; // Assume success (following quick_field_entrypoints conventions) + ArtField* f = + FindFieldFromCode<StaticPrimitiveWrite, false>(field_idx, referrer, self, primitive_type); + if (LIKELY(f != nullptr)) { + ObjPtr<mirror::Object> obj = f->GetDeclaringClass(); + (f->*func)(obj, new_value); + } else { + res = -1; // Failure + } + return res; +} + +extern "C" int MterpSetBooleanStatic(uint32_t field_idx, + uint8_t new_value, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpSetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx, + new_value, + referrer, + self, + &ArtField::SetBoolean<false>); +} + +extern "C" int MterpSetByteStatic(uint32_t field_idx, + int8_t new_value, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpSetStatic<int8_t, Primitive::kPrimByte>(field_idx, + new_value, + referrer, + self, + &ArtField::SetByte<false>); +} + +extern "C" int MterpSetCharStatic(uint32_t field_idx, + uint16_t new_value, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpSetStatic<uint16_t, Primitive::kPrimChar>(field_idx, + new_value, + referrer, + self, + &ArtField::SetChar<false>); +} + +extern "C" int MterpSetShortStatic(uint32_t field_idx, + int16_t new_value, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpSetStatic<int16_t, Primitive::kPrimShort>(field_idx, + new_value, + referrer, + self, + &ArtField::SetShort<false>); +} + +extern "C" int MterpSet32Static(uint32_t field_idx, + int32_t new_value, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpSetStatic<int32_t, Primitive::kPrimInt>(field_idx, + new_value, + referrer, + self, + &ArtField::SetInt<false>); +} + +extern "C" int MterpSet64Static(uint32_t field_idx, + int64_t* new_value, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + return MterpSetStatic<int64_t, Primitive::kPrimLong>(field_idx, + *new_value, + referrer, + self, + &ArtField::SetLong<false>); +} + +extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, + int32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { if (UNLIKELY(arr == nullptr)) { ThrowNullPointerExceptionFromInterpreter(); @@ -703,7 +862,8 @@ extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t i } } -extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset) +extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, + uint32_t field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { if (UNLIKELY(obj == nullptr)) { ThrowNullPointerExceptionFromInterpreter(); diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S index 576020fff0..e2b693f269 100644 --- a/runtime/interpreter/mterp/out/mterp_arm.S +++ b/runtime/interpreter/mterp/out/mterp_arm.S @@ -2631,12 +2631,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field@BBBB */ - .extern artGet32StaticFromCode + .extern MterpGet32Static EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGet32StaticFromCode + bl MterpGet32Static ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r2, rINST, lsr #8 @ r2<- AA PREFETCH_INST 2 @@ -2661,12 +2661,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* sget-wide vAA, field@BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGet64StaticFromCode + bl MterpGet64Static ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r9, rINST, lsr #8 @ r9<- AA VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA] @@ -2690,12 +2690,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field@BBBB */ - .extern artGetObjStaticFromCode + .extern MterpGetObjStatic EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGetObjStaticFromCode + bl MterpGetObjStatic ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r2, rINST, lsr #8 @ r2<- AA PREFETCH_INST 2 @@ -2723,12 +2723,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field@BBBB */ - .extern artGetBooleanStaticFromCode + .extern MterpGetBooleanStatic EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGetBooleanStaticFromCode + bl MterpGetBooleanStatic ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r2, rINST, lsr #8 @ r2<- AA PREFETCH_INST 2 @@ -2756,12 +2756,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field@BBBB */ - .extern artGetByteStaticFromCode + .extern MterpGetByteStatic EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGetByteStaticFromCode + bl MterpGetByteStatic ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r2, rINST, lsr #8 @ r2<- AA PREFETCH_INST 2 @@ -2789,12 +2789,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field@BBBB */ - .extern artGetCharStaticFromCode + .extern MterpGetCharStatic EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGetCharStaticFromCode + bl MterpGetCharStatic ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r2, rINST, lsr #8 @ r2<- AA PREFETCH_INST 2 @@ -2822,12 +2822,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field@BBBB */ - .extern artGetShortStaticFromCode + .extern MterpGetShortStatic EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rSELF - bl artGetShortStaticFromCode + bl MterpGetShortStatic ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r2, rINST, lsr #8 @ r2<- AA PREFETCH_INST 2 @@ -2860,7 +2860,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr r2, [rFP, #OFF_FP_METHOD] mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC - bl artSet32StaticFromCode + bl MterpSet32Static cmp r0, #0 @ 0 on success, -1 on failure bne MterpException ADVANCE 2 @ Past exception point - now advance rPC @@ -2876,15 +2876,15 @@ artMterpAsmInstructionStart = .L_op_nop * */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC FETCH r0, 1 @ r0<- field ref BBBB - ldr r1, [rFP, #OFF_FP_METHOD] - mov r2, rINST, lsr #8 @ r3<- AA - VREG_INDEX_TO_ADDR r2, r2 + mov r1, rINST, lsr #8 @ r1<- AA + VREG_INDEX_TO_ADDR r1, r1 + ldr r2, [rFP, #OFF_FP_METHOD] mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC - bl artSet64IndirectStaticFromMterp + bl MterpSet64Static cmp r0, #0 @ 0 on success, -1 on failure bne MterpException ADVANCE 2 @ Past exception point - now advance rPC @@ -2925,7 +2925,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr r2, [rFP, #OFF_FP_METHOD] mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC - bl artSet8StaticFromCode + bl MterpSetBooleanStatic cmp r0, #0 @ 0 on success, -1 on failure bne MterpException ADVANCE 2 @ Past exception point - now advance rPC @@ -2951,7 +2951,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr r2, [rFP, #OFF_FP_METHOD] mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC - bl artSet8StaticFromCode + bl MterpSetByteStatic cmp r0, #0 @ 0 on success, -1 on failure bne MterpException ADVANCE 2 @ Past exception point - now advance rPC @@ -2977,7 +2977,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr r2, [rFP, #OFF_FP_METHOD] mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC - bl artSet16StaticFromCode + bl MterpSetCharStatic cmp r0, #0 @ 0 on success, -1 on failure bne MterpException ADVANCE 2 @ Past exception point - now advance rPC @@ -3003,7 +3003,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr r2, [rFP, #OFF_FP_METHOD] mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC - bl artSet16StaticFromCode + bl MterpSetShortStatic cmp r0, #0 @ 0 on success, -1 on failure bne MterpException ADVANCE 2 @ Past exception point - now advance rPC diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S index a7b55877b2..ef5a4daa51 100644 --- a/runtime/interpreter/mterp/out/mterp_arm64.S +++ b/runtime/interpreter/mterp/out/mterp_arm64.S @@ -2543,12 +2543,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field//BBBB */ - .extern artGet32StaticFromCode + .extern MterpGet32Static EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGet32StaticFromCode + bl MterpGet32Static ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w2, wINST, #8 // w2<- AA @@ -2573,12 +2573,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* sget-wide vAA, field//BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64StaticFromCode EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGet64StaticFromCode + bl MterpGet64Static ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w4, wINST, #8 // w4<- AA cbnz x3, MterpException // bail out @@ -2599,12 +2599,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field//BBBB */ - .extern artGetObjStaticFromCode + .extern MterpGetObjStatic EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGetObjStaticFromCode + bl MterpGetObjStatic ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w2, wINST, #8 // w2<- AA @@ -2632,12 +2632,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field//BBBB */ - .extern artGetBooleanStaticFromCode + .extern MterpGetBooleanStatic EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGetBooleanStaticFromCode + bl MterpGetBooleanStatic ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w2, wINST, #8 // w2<- AA uxtb w0, w0 @@ -2665,12 +2665,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field//BBBB */ - .extern artGetByteStaticFromCode + .extern MterpGetByteStatic EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGetByteStaticFromCode + bl MterpGetByteStatic ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w2, wINST, #8 // w2<- AA sxtb w0, w0 @@ -2698,12 +2698,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field//BBBB */ - .extern artGetCharStaticFromCode + .extern MterpGetCharStatic EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGetCharStaticFromCode + bl MterpGetCharStatic ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w2, wINST, #8 // w2<- AA uxth w0, w0 @@ -2731,12 +2731,12 @@ artMterpAsmInstructionStart = .L_op_nop */ /* op vAA, field//BBBB */ - .extern artGetShortStaticFromCode + .extern MterpGetShortStatic EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] mov x2, xSELF - bl artGetShortStaticFromCode + bl MterpGetShortStatic ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET] lsr w2, wINST, #8 // w2<- AA sxth w0, w0 @@ -2769,7 +2769,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr x2, [xFP, #OFF_FP_METHOD] mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC - bl artSet32StaticFromCode + bl MterpSet32Static cbnz w0, MterpException // 0 on success ADVANCE 2 // Past exception point - now advance rPC GET_INST_OPCODE ip // extract opcode from rINST @@ -2784,15 +2784,15 @@ artMterpAsmInstructionStart = .L_op_nop * */ /* sput-wide vAA, field//BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC FETCH w0, 1 // w0<- field ref BBBB - ldr x1, [xFP, #OFF_FP_METHOD] - lsr w2, wINST, #8 // w3<- AA - VREG_INDEX_TO_ADDR x2, w2 + lsr w1, wINST, #8 // w1<- AA + VREG_INDEX_TO_ADDR x1, w1 + ldr x2, [xFP, #OFF_FP_METHOD] mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC - bl artSet64IndirectStaticFromMterp + bl MterpSet64Static cbnz w0, MterpException // 0 on success, -1 on failure ADVANCE 2 // Past exception point - now advance rPC GET_INST_OPCODE ip // extract opcode from wINST @@ -2831,7 +2831,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr x2, [xFP, #OFF_FP_METHOD] mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC - bl artSet8StaticFromCode + bl MterpSetBooleanStatic cbnz w0, MterpException // 0 on success ADVANCE 2 // Past exception point - now advance rPC GET_INST_OPCODE ip // extract opcode from rINST @@ -2856,7 +2856,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr x2, [xFP, #OFF_FP_METHOD] mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC - bl artSet8StaticFromCode + bl MterpSetByteStatic cbnz w0, MterpException // 0 on success ADVANCE 2 // Past exception point - now advance rPC GET_INST_OPCODE ip // extract opcode from rINST @@ -2881,7 +2881,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr x2, [xFP, #OFF_FP_METHOD] mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC - bl artSet16StaticFromCode + bl MterpSetCharStatic cbnz w0, MterpException // 0 on success ADVANCE 2 // Past exception point - now advance rPC GET_INST_OPCODE ip // extract opcode from rINST @@ -2906,7 +2906,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr x2, [xFP, #OFF_FP_METHOD] mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC - bl artSet16StaticFromCode + bl MterpSetShortStatic cbnz w0, MterpException // 0 on success ADVANCE 2 // Past exception point - now advance rPC GET_INST_OPCODE ip // extract opcode from rINST diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S index b47c0195ba..579afc2387 100644 --- a/runtime/interpreter/mterp/out/mterp_mips.S +++ b/runtime/interpreter/mterp/out/mterp_mips.S @@ -3038,12 +3038,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGet32StaticFromCode + .extern MterpGet32Static EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGet32StaticFromCode) + JAL(MterpGet32Static) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) GET_OPA(a2) # a2 <- AA PREFETCH_INST(2) @@ -3064,12 +3064,12 @@ artMterpAsmInstructionStart = .L_op_nop * 64-bit SGET handler. */ /* sget-wide vAA, field@BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGet64StaticFromCode) + JAL(MterpGet64Static) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) bnez a3, MterpException GET_OPA(a1) # a1 <- AA @@ -3088,12 +3088,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetObjStaticFromCode + .extern MterpGetObjStatic EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGetObjStaticFromCode) + JAL(MterpGetObjStatic) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) GET_OPA(a2) # a2 <- AA PREFETCH_INST(2) @@ -3118,12 +3118,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetBooleanStaticFromCode + .extern MterpGetBooleanStatic EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGetBooleanStaticFromCode) + JAL(MterpGetBooleanStatic) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) GET_OPA(a2) # a2 <- AA PREFETCH_INST(2) @@ -3148,12 +3148,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetByteStaticFromCode + .extern MterpGetByteStatic EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGetByteStaticFromCode) + JAL(MterpGetByteStatic) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) GET_OPA(a2) # a2 <- AA PREFETCH_INST(2) @@ -3178,12 +3178,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetCharStaticFromCode + .extern MterpGetCharStatic EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGetCharStaticFromCode) + JAL(MterpGetCharStatic) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) GET_OPA(a2) # a2 <- AA PREFETCH_INST(2) @@ -3208,12 +3208,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetShortStaticFromCode + .extern MterpGetShortStatic EXPORT_PC() FETCH(a0, 1) # a0 <- field ref BBBB lw a1, OFF_FP_METHOD(rFP) # a1 <- method move a2, rSELF # a2 <- self - JAL(artGetShortStaticFromCode) + JAL(MterpGetShortStatic) lw a3, THREAD_EXCEPTION_OFFSET(rSELF) GET_OPA(a2) # a2 <- AA PREFETCH_INST(2) @@ -3244,7 +3244,7 @@ artMterpAsmInstructionStart = .L_op_nop lw a2, OFF_FP_METHOD(rFP) # a2 <- method move a3, rSELF # a3 <- self PREFETCH_INST(2) # load rINST - JAL(artSet32StaticFromCode) + JAL(MterpSet32Static) bnez v0, MterpException # bail out ADVANCE(2) # advance rPC GET_INST_OPCODE(t0) # extract opcode from rINST @@ -3258,15 +3258,15 @@ artMterpAsmInstructionStart = .L_op_nop * 64-bit SPUT handler. */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC() FETCH(a0, 1) # a0 <- field ref CCCC - lw a1, OFF_FP_METHOD(rFP) # a1 <- method - GET_OPA(a2) # a2 <- AA - EAS2(a2, rFP, a2) # a2 <- &fp[AA] + GET_OPA(a1) # a1 <- AA + EAS2(a1, rFP, a1) # a1 <- &fp[AA] + lw a2, OFF_FP_METHOD(rFP) # a2 <- method move a3, rSELF # a3 <- self PREFETCH_INST(2) # load rINST - JAL(artSet64IndirectStaticFromMterp) + JAL(MterpSet64Static) bnez v0, MterpException # bail out ADVANCE(2) # advance rPC GET_INST_OPCODE(t0) # extract opcode from rINST @@ -3311,7 +3311,7 @@ artMterpAsmInstructionStart = .L_op_nop lw a2, OFF_FP_METHOD(rFP) # a2 <- method move a3, rSELF # a3 <- self PREFETCH_INST(2) # load rINST - JAL(artSet8StaticFromCode) + JAL(MterpSetBooleanStatic) bnez v0, MterpException # bail out ADVANCE(2) # advance rPC GET_INST_OPCODE(t0) # extract opcode from rINST @@ -3336,7 +3336,7 @@ artMterpAsmInstructionStart = .L_op_nop lw a2, OFF_FP_METHOD(rFP) # a2 <- method move a3, rSELF # a3 <- self PREFETCH_INST(2) # load rINST - JAL(artSet8StaticFromCode) + JAL(MterpSetByteStatic) bnez v0, MterpException # bail out ADVANCE(2) # advance rPC GET_INST_OPCODE(t0) # extract opcode from rINST @@ -3361,7 +3361,7 @@ artMterpAsmInstructionStart = .L_op_nop lw a2, OFF_FP_METHOD(rFP) # a2 <- method move a3, rSELF # a3 <- self PREFETCH_INST(2) # load rINST - JAL(artSet16StaticFromCode) + JAL(MterpSetCharStatic) bnez v0, MterpException # bail out ADVANCE(2) # advance rPC GET_INST_OPCODE(t0) # extract opcode from rINST @@ -3386,7 +3386,7 @@ artMterpAsmInstructionStart = .L_op_nop lw a2, OFF_FP_METHOD(rFP) # a2 <- method move a3, rSELF # a3 <- self PREFETCH_INST(2) # load rINST - JAL(artSet16StaticFromCode) + JAL(MterpSetShortStatic) bnez v0, MterpException # bail out ADVANCE(2) # advance rPC GET_INST_OPCODE(t0) # extract opcode from rINST diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S index e1867d035a..3656df9a8e 100644 --- a/runtime/interpreter/mterp/out/mterp_mips64.S +++ b/runtime/interpreter/mterp/out/mterp_mips64.S @@ -2585,12 +2585,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field//BBBB */ - .extern artGet32StaticFromCode + .extern MterpGet32Static EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGet32StaticFromCode + jal MterpGet32Static ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a2, rINST, 8 # a2 <- AA @@ -2614,12 +2614,12 @@ artMterpAsmInstructionStart = .L_op_nop * */ /* sget-wide vAA, field//BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGet64StaticFromCode + jal MterpGet64Static ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a4, rINST, 8 # a4 <- AA bnez a3, MterpException # bail out @@ -2639,12 +2639,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field//BBBB */ - .extern artGetObjStaticFromCode + .extern MterpGetObjStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGetObjStaticFromCode + jal MterpGetObjStatic ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a2, rINST, 8 # a2 <- AA @@ -2671,12 +2671,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field//BBBB */ - .extern artGetBooleanStaticFromCode + .extern MterpGetBooleanStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGetBooleanStaticFromCode + jal MterpGetBooleanStatic ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a2, rINST, 8 # a2 <- AA and v0, v0, 0xff @@ -2703,12 +2703,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field//BBBB */ - .extern artGetByteStaticFromCode + .extern MterpGetByteStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGetByteStaticFromCode + jal MterpGetByteStatic ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a2, rINST, 8 # a2 <- AA seb v0, v0 @@ -2735,12 +2735,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field//BBBB */ - .extern artGetCharStaticFromCode + .extern MterpGetCharStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGetCharStaticFromCode + jal MterpGetCharStatic ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a2, rINST, 8 # a2 <- AA and v0, v0, 0xffff @@ -2767,12 +2767,12 @@ artMterpAsmInstructionStart = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field//BBBB */ - .extern artGetShortStaticFromCode + .extern MterpGetShortStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB ld a1, OFF_FP_METHOD(rFP) move a2, rSELF - jal artGetShortStaticFromCode + jal MterpGetShortStatic ld a3, THREAD_EXCEPTION_OFFSET(rSELF) srl a2, rINST, 8 # a2 <- AA seh v0, v0 @@ -2798,7 +2798,7 @@ artMterpAsmInstructionStart = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field//BBBB */ - .extern artSet32StaticFromCode + .extern MterpSet32Static EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB srl a3, rINST, 8 # a3 <- AA @@ -2806,7 +2806,7 @@ artMterpAsmInstructionStart = .L_op_nop ld a2, OFF_FP_METHOD(rFP) move a3, rSELF PREFETCH_INST 2 # Get next inst, but don't advance rPC - jal artSet32StaticFromCode + jal MterpSet32Static bnezc v0, MterpException # 0 on success ADVANCE 2 # Past exception point - now advance rPC GET_INST_OPCODE v0 # extract opcode from rINST @@ -2821,15 +2821,15 @@ artMterpAsmInstructionStart = .L_op_nop * */ /* sput-wide vAA, field//BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB - ld a1, OFF_FP_METHOD(rFP) - srl a2, rINST, 8 # a2 <- AA - dlsa a2, a2, rFP, 2 + srl a1, rINST, 8 # a2 <- AA + dlsa a1, a1, rFP, 2 + ld a2, OFF_FP_METHOD(rFP) move a3, rSELF PREFETCH_INST 2 # Get next inst, but don't advance rPC - jal artSet64IndirectStaticFromMterp + jal MterpSet64Static bnezc v0, MterpException # 0 on success, -1 on failure ADVANCE 2 # Past exception point - now advance rPC GET_INST_OPCODE v0 # extract opcode from rINST @@ -2862,7 +2862,7 @@ artMterpAsmInstructionStart = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field//BBBB */ - .extern artSet8StaticFromCode + .extern MterpSetBooleanStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB srl a3, rINST, 8 # a3 <- AA @@ -2870,7 +2870,7 @@ artMterpAsmInstructionStart = .L_op_nop ld a2, OFF_FP_METHOD(rFP) move a3, rSELF PREFETCH_INST 2 # Get next inst, but don't advance rPC - jal artSet8StaticFromCode + jal MterpSetBooleanStatic bnezc v0, MterpException # 0 on success ADVANCE 2 # Past exception point - now advance rPC GET_INST_OPCODE v0 # extract opcode from rINST @@ -2888,7 +2888,7 @@ artMterpAsmInstructionStart = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field//BBBB */ - .extern artSet8StaticFromCode + .extern MterpSetByteStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB srl a3, rINST, 8 # a3 <- AA @@ -2896,7 +2896,7 @@ artMterpAsmInstructionStart = .L_op_nop ld a2, OFF_FP_METHOD(rFP) move a3, rSELF PREFETCH_INST 2 # Get next inst, but don't advance rPC - jal artSet8StaticFromCode + jal MterpSetByteStatic bnezc v0, MterpException # 0 on success ADVANCE 2 # Past exception point - now advance rPC GET_INST_OPCODE v0 # extract opcode from rINST @@ -2914,7 +2914,7 @@ artMterpAsmInstructionStart = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field//BBBB */ - .extern artSet16StaticFromCode + .extern MterpSetCharStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB srl a3, rINST, 8 # a3 <- AA @@ -2922,7 +2922,7 @@ artMterpAsmInstructionStart = .L_op_nop ld a2, OFF_FP_METHOD(rFP) move a3, rSELF PREFETCH_INST 2 # Get next inst, but don't advance rPC - jal artSet16StaticFromCode + jal MterpSetCharStatic bnezc v0, MterpException # 0 on success ADVANCE 2 # Past exception point - now advance rPC GET_INST_OPCODE v0 # extract opcode from rINST @@ -2940,7 +2940,7 @@ artMterpAsmInstructionStart = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field//BBBB */ - .extern artSet16StaticFromCode + .extern MterpSetShortStatic EXPORT_PC lhu a0, 2(rPC) # a0 <- field ref BBBB srl a3, rINST, 8 # a3 <- AA @@ -2948,7 +2948,7 @@ artMterpAsmInstructionStart = .L_op_nop ld a2, OFF_FP_METHOD(rFP) move a3, rSELF PREFETCH_INST 2 # Get next inst, but don't advance rPC - jal artSet16StaticFromCode + jal MterpSetShortStatic bnezc v0, MterpException # 0 on success ADVANCE 2 # Past exception point - now advance rPC GET_INST_OPCODE v0 # extract opcode from rINST diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S index aab20f5126..21d9671f8b 100644 --- a/runtime/interpreter/mterp/out/mterp_x86.S +++ b/runtime/interpreter/mterp/out/mterp_x86.S @@ -2535,7 +2535,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGet32StaticFromCode + .extern MterpGet32Static EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -2543,7 +2543,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGet32StaticFromCode) + call SYMBOL(MterpGet32Static) movl rSELF, %ecx RESTORE_IBASE_FROM_SELF %ecx cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx) @@ -2564,7 +2564,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * */ /* sget-wide vAA, field@BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -2572,7 +2572,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGet64StaticFromCode) + call SYMBOL(MterpGet64Static) movl rSELF, %ecx cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx) jnz MterpException @@ -2592,7 +2592,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetObjStaticFromCode + .extern MterpGetObjStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -2600,7 +2600,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGetObjStaticFromCode) + call SYMBOL(MterpGetObjStatic) movl rSELF, %ecx RESTORE_IBASE_FROM_SELF %ecx cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx) @@ -2624,7 +2624,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetBooleanStaticFromCode + .extern MterpGetBooleanStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -2632,7 +2632,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGetBooleanStaticFromCode) + call SYMBOL(MterpGetBooleanStatic) movl rSELF, %ecx RESTORE_IBASE_FROM_SELF %ecx cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx) @@ -2656,7 +2656,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetByteStaticFromCode + .extern MterpGetByteStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -2664,7 +2664,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGetByteStaticFromCode) + call SYMBOL(MterpGetByteStatic) movl rSELF, %ecx RESTORE_IBASE_FROM_SELF %ecx cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx) @@ -2688,7 +2688,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetCharStaticFromCode + .extern MterpGetCharStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -2696,7 +2696,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGetCharStaticFromCode) + call SYMBOL(MterpGetCharStatic) movl rSELF, %ecx RESTORE_IBASE_FROM_SELF %ecx cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx) @@ -2720,7 +2720,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short */ /* op vAA, field@BBBB */ - .extern artGetShortStaticFromCode + .extern MterpGetShortStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -2728,7 +2728,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGetShortStaticFromCode) + call SYMBOL(MterpGetShortStatic) movl rSELF, %ecx RESTORE_IBASE_FROM_SELF %ecx cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx) @@ -2751,7 +2751,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet32StaticFromCode + .extern MterpSet32Static EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref BBBB @@ -2761,7 +2761,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG2(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG3(%esp) # self - call SYMBOL(artSet32StaticFromCode) + call SYMBOL(MterpSet32Static) testb %al, %al jnz MterpException RESTORE_IBASE @@ -2776,17 +2776,17 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref BBBB - movl OFF_FP_METHOD(rFP), %eax - movl %eax, OUT_ARG1(%esp) # referrer leal VREG_ADDRESS(rINST), %eax - movl %eax, OUT_ARG2(%esp) # &fp[AA] + movl %eax, OUT_ARG1(%esp) # &fp[AA] + movl OFF_FP_METHOD(rFP), %eax + movl %eax, OUT_ARG2(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG3(%esp) # self - call SYMBOL(artSet64IndirectStaticFromMterp) + call SYMBOL(MterpSet64Static) testb %al, %al jnz MterpException RESTORE_IBASE @@ -2821,7 +2821,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet8StaticFromCode + .extern MterpSetBooleanStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref BBBB @@ -2831,7 +2831,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG2(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG3(%esp) # self - call SYMBOL(artSet8StaticFromCode) + call SYMBOL(MterpSetBooleanStatic) testb %al, %al jnz MterpException RESTORE_IBASE @@ -2849,7 +2849,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet8StaticFromCode + .extern MterpSetByteStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref BBBB @@ -2859,7 +2859,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG2(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG3(%esp) # self - call SYMBOL(artSet8StaticFromCode) + call SYMBOL(MterpSetByteStatic) testb %al, %al jnz MterpException RESTORE_IBASE @@ -2877,7 +2877,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet16StaticFromCode + .extern MterpSetCharStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref BBBB @@ -2887,7 +2887,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG2(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG3(%esp) # self - call SYMBOL(artSet16StaticFromCode) + call SYMBOL(MterpSetCharStatic) testb %al, %al jnz MterpException RESTORE_IBASE @@ -2905,7 +2905,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet16StaticFromCode + .extern MterpSetShortStatic EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref BBBB @@ -2915,7 +2915,7 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop movl %eax, OUT_ARG2(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG3(%esp) # self - call SYMBOL(artSet16StaticFromCode) + call SYMBOL(MterpSetShortStatic) testb %al, %al jnz MterpException RESTORE_IBASE diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S index eb570669b7..b5a5ae5963 100644 --- a/runtime/interpreter/mterp/out/mterp_x86_64.S +++ b/runtime/interpreter/mterp/out/mterp_x86_64.S @@ -2445,12 +2445,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide */ /* op vAA, field@BBBB */ - .extern artGet32StaticFromCode + .extern MterpGet32Static EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref CCCC movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer movq rSELF, OUT_ARG2 # self - call SYMBOL(artGet32StaticFromCode) + call SYMBOL(MterpGet32Static) movq rSELF, %rcx cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx) jnz MterpException @@ -2476,12 +2476,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide */ /* op vAA, field@BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref CCCC movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer movq rSELF, OUT_ARG2 # self - call SYMBOL(artGet64StaticFromCode) + call SYMBOL(MterpGet64Static) movq rSELF, %rcx cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx) jnz MterpException @@ -2508,12 +2508,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide */ /* op vAA, field@BBBB */ - .extern artGetObjStaticFromCode + .extern MterpGetObjStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref CCCC movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer movq rSELF, OUT_ARG2 # self - call SYMBOL(artGetObjStaticFromCode) + call SYMBOL(MterpGetObjStatic) movq rSELF, %rcx cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx) jnz MterpException @@ -2540,12 +2540,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide */ /* op vAA, field@BBBB */ - .extern artGetBooleanStaticFromCode + .extern MterpGetBooleanStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref CCCC movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer movq rSELF, OUT_ARG2 # self - call SYMBOL(artGetBooleanStaticFromCode) + call SYMBOL(MterpGetBooleanStatic) movq rSELF, %rcx cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx) jnz MterpException @@ -2572,12 +2572,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide */ /* op vAA, field@BBBB */ - .extern artGetByteStaticFromCode + .extern MterpGetByteStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref CCCC movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer movq rSELF, OUT_ARG2 # self - call SYMBOL(artGetByteStaticFromCode) + call SYMBOL(MterpGetByteStatic) movq rSELF, %rcx cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx) jnz MterpException @@ -2604,12 +2604,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide */ /* op vAA, field@BBBB */ - .extern artGetCharStaticFromCode + .extern MterpGetCharStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref CCCC movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer movq rSELF, OUT_ARG2 # self - call SYMBOL(artGetCharStaticFromCode) + call SYMBOL(MterpGetCharStatic) movq rSELF, %rcx cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx) jnz MterpException @@ -2636,12 +2636,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide */ /* op vAA, field@BBBB */ - .extern artGetShortStaticFromCode + .extern MterpGetShortStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref CCCC movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer movq rSELF, OUT_ARG2 # self - call SYMBOL(artGetShortStaticFromCode) + call SYMBOL(MterpGetShortStatic) movq rSELF, %rcx cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx) jnz MterpException @@ -2667,13 +2667,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet32StaticFromCode + .extern MterpSet32Static EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref BBBB GET_VREG OUT_32_ARG1, rINSTq # fp[AA] movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer movq rSELF, OUT_ARG3 # self - call SYMBOL(artSet32StaticFromCode) + call SYMBOL(MterpSet32Static) testb %al, %al jnz MterpException ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 @@ -2687,13 +2687,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref BBBB - movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer - leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA] + leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA] + movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer movq rSELF, OUT_ARG3 # self - call SYMBOL(artSet64IndirectStaticFromMterp) + call SYMBOL(MterpSet64Static) testb %al, %al jnz MterpException ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 @@ -2724,13 +2724,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet8StaticFromCode + .extern MterpSetBooleanStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref BBBB GET_VREG OUT_32_ARG1, rINSTq # fp[AA] movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer movq rSELF, OUT_ARG3 # self - call SYMBOL(artSet8StaticFromCode) + call SYMBOL(MterpSetBooleanStatic) testb %al, %al jnz MterpException ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 @@ -2747,13 +2747,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet8StaticFromCode + .extern MterpSetByteStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref BBBB GET_VREG OUT_32_ARG1, rINSTq # fp[AA] movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer movq rSELF, OUT_ARG3 # self - call SYMBOL(artSet8StaticFromCode) + call SYMBOL(MterpSetByteStatic) testb %al, %al jnz MterpException ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 @@ -2770,13 +2770,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet16StaticFromCode + .extern MterpSetCharStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref BBBB GET_VREG OUT_32_ARG1, rINSTq # fp[AA] movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer movq rSELF, OUT_ARG3 # self - call SYMBOL(artSet16StaticFromCode) + call SYMBOL(MterpSetCharStatic) testb %al, %al jnz MterpException ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 @@ -2793,13 +2793,13 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * for: sput, sput-boolean, sput-byte, sput-char, sput-short */ /* op vAA, field@BBBB */ - .extern artSet16StaticFromCode + .extern MterpSetShortStatic EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref BBBB GET_VREG OUT_32_ARG1, rINSTq # fp[AA] movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer movq rSELF, OUT_ARG3 # self - call SYMBOL(artSet16StaticFromCode) + call SYMBOL(MterpSetShortStatic) testb %al, %al jnz MterpException ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S index 0e9a3d82da..6e42d323e6 100644 --- a/runtime/interpreter/mterp/x86/op_sget.S +++ b/runtime/interpreter/mterp/x86/op_sget.S @@ -1,4 +1,4 @@ -%default { "is_object":"0", "helper":"artGet32StaticFromCode" } +%default { "is_object":"0", "helper":"MterpGet32Static" } /* * General SGET handler wrapper. * diff --git a/runtime/interpreter/mterp/x86/op_sget_boolean.S b/runtime/interpreter/mterp/x86/op_sget_boolean.S index f058dd8f7c..5fa2bf0cfc 100644 --- a/runtime/interpreter/mterp/x86/op_sget_boolean.S +++ b/runtime/interpreter/mterp/x86/op_sget_boolean.S @@ -1 +1 @@ -%include "x86/op_sget.S" {"helper":"artGetBooleanStaticFromCode"} +%include "x86/op_sget.S" {"helper":"MterpGetBooleanStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sget_byte.S b/runtime/interpreter/mterp/x86/op_sget_byte.S index c952f40772..ef812f118e 100644 --- a/runtime/interpreter/mterp/x86/op_sget_byte.S +++ b/runtime/interpreter/mterp/x86/op_sget_byte.S @@ -1 +1 @@ -%include "x86/op_sget.S" {"helper":"artGetByteStaticFromCode"} +%include "x86/op_sget.S" {"helper":"MterpGetByteStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sget_char.S b/runtime/interpreter/mterp/x86/op_sget_char.S index d7bd410c7d..3bc34ef338 100644 --- a/runtime/interpreter/mterp/x86/op_sget_char.S +++ b/runtime/interpreter/mterp/x86/op_sget_char.S @@ -1 +1 @@ -%include "x86/op_sget.S" {"helper":"artGetCharStaticFromCode"} +%include "x86/op_sget.S" {"helper":"MterpGetCharStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sget_object.S b/runtime/interpreter/mterp/x86/op_sget_object.S index 1c95f9a00e..b829e75f30 100644 --- a/runtime/interpreter/mterp/x86/op_sget_object.S +++ b/runtime/interpreter/mterp/x86/op_sget_object.S @@ -1 +1 @@ -%include "x86/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"} +%include "x86/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sget_short.S b/runtime/interpreter/mterp/x86/op_sget_short.S index 6475306b26..449cf6f918 100644 --- a/runtime/interpreter/mterp/x86/op_sget_short.S +++ b/runtime/interpreter/mterp/x86/op_sget_short.S @@ -1 +1 @@ -%include "x86/op_sget.S" {"helper":"artGetShortStaticFromCode"} +%include "x86/op_sget.S" {"helper":"MterpGetShortStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S index 2b603034c6..a605bcf2e5 100644 --- a/runtime/interpreter/mterp/x86/op_sget_wide.S +++ b/runtime/interpreter/mterp/x86/op_sget_wide.S @@ -3,7 +3,7 @@ * */ /* sget-wide vAA, field@BBBB */ - .extern artGet64StaticFromCode + .extern MterpGet64Static EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref CCCC @@ -11,7 +11,7 @@ movl %eax, OUT_ARG1(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG2(%esp) # self - call SYMBOL(artGet64StaticFromCode) + call SYMBOL(MterpGet64Static) movl rSELF, %ecx cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx) jnz MterpException diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S index 0b5de0953d..99f6088982 100644 --- a/runtime/interpreter/mterp/x86/op_sput.S +++ b/runtime/interpreter/mterp/x86/op_sput.S @@ -1,4 +1,4 @@ -%default { "helper":"artSet32StaticFromCode"} +%default { "helper":"MterpSet32Static"} /* * General SPUT handler wrapper. * diff --git a/runtime/interpreter/mterp/x86/op_sput_boolean.S b/runtime/interpreter/mterp/x86/op_sput_boolean.S index 63601bd2bb..a7fffda1db 100644 --- a/runtime/interpreter/mterp/x86/op_sput_boolean.S +++ b/runtime/interpreter/mterp/x86/op_sput_boolean.S @@ -1 +1 @@ -%include "x86/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "x86/op_sput.S" {"helper":"MterpSetBooleanStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sput_byte.S b/runtime/interpreter/mterp/x86/op_sput_byte.S index 63601bd2bb..3a5ff9267d 100644 --- a/runtime/interpreter/mterp/x86/op_sput_byte.S +++ b/runtime/interpreter/mterp/x86/op_sput_byte.S @@ -1 +1 @@ -%include "x86/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "x86/op_sput.S" {"helper":"MterpSetByteStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sput_char.S b/runtime/interpreter/mterp/x86/op_sput_char.S index 1749f7c9f8..565cc2aa0f 100644 --- a/runtime/interpreter/mterp/x86/op_sput_char.S +++ b/runtime/interpreter/mterp/x86/op_sput_char.S @@ -1 +1 @@ -%include "x86/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "x86/op_sput.S" {"helper":"MterpSetCharStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sput_short.S b/runtime/interpreter/mterp/x86/op_sput_short.S index 1749f7c9f8..85c344165e 100644 --- a/runtime/interpreter/mterp/x86/op_sput_short.S +++ b/runtime/interpreter/mterp/x86/op_sput_short.S @@ -1 +1 @@ -%include "x86/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "x86/op_sput.S" {"helper":"MterpSetShortStatic"} diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S index 19cff0db5a..8cc7e28554 100644 --- a/runtime/interpreter/mterp/x86/op_sput_wide.S +++ b/runtime/interpreter/mterp/x86/op_sput_wide.S @@ -3,17 +3,17 @@ * */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC movzwl 2(rPC), %eax movl %eax, OUT_ARG0(%esp) # field ref BBBB - movl OFF_FP_METHOD(rFP), %eax - movl %eax, OUT_ARG1(%esp) # referrer leal VREG_ADDRESS(rINST), %eax - movl %eax, OUT_ARG2(%esp) # &fp[AA] + movl %eax, OUT_ARG1(%esp) # &fp[AA] + movl OFF_FP_METHOD(rFP), %eax + movl %eax, OUT_ARG2(%esp) # referrer movl rSELF, %ecx movl %ecx, OUT_ARG3(%esp) # self - call SYMBOL(artSet64IndirectStaticFromMterp) + call SYMBOL(MterpSet64Static) testb %al, %al jnz MterpException RESTORE_IBASE diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S index d39e6c4396..e996c77801 100644 --- a/runtime/interpreter/mterp/x86_64/op_sget.S +++ b/runtime/interpreter/mterp/x86_64/op_sget.S @@ -1,4 +1,4 @@ -%default { "is_object":"0", "helper":"artGet32StaticFromCode", "wide":"0" } +%default { "is_object":"0", "helper":"MterpGet32Static", "wide":"0" } /* * General SGET handler wrapper. * diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S index 7d358daec2..ee772ad4e1 100644 --- a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S +++ b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S @@ -1 +1 @@ -%include "x86_64/op_sget.S" {"helper":"artGetBooleanStaticFromCode"} +%include "x86_64/op_sget.S" {"helper":"MterpGetBooleanStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S index 79d9ff448b..f65ea4951e 100644 --- a/runtime/interpreter/mterp/x86_64/op_sget_byte.S +++ b/runtime/interpreter/mterp/x86_64/op_sget_byte.S @@ -1 +1 @@ -%include "x86_64/op_sget.S" {"helper":"artGetByteStaticFromCode"} +%include "x86_64/op_sget.S" {"helper":"MterpGetByteStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S index 448861052f..3972551bec 100644 --- a/runtime/interpreter/mterp/x86_64/op_sget_char.S +++ b/runtime/interpreter/mterp/x86_64/op_sget_char.S @@ -1 +1 @@ -%include "x86_64/op_sget.S" {"helper":"artGetCharStaticFromCode"} +%include "x86_64/op_sget.S" {"helper":"MterpGetCharStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S index 09b627e124..a0bbfd8d35 100644 --- a/runtime/interpreter/mterp/x86_64/op_sget_object.S +++ b/runtime/interpreter/mterp/x86_64/op_sget_object.S @@ -1 +1 @@ -%include "x86_64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"} +%include "x86_64/op_sget.S" {"is_object":"1", "helper":"MterpGetObjStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S index 47ac23803c..df212dc5c9 100644 --- a/runtime/interpreter/mterp/x86_64/op_sget_short.S +++ b/runtime/interpreter/mterp/x86_64/op_sget_short.S @@ -1 +1 @@ -%include "x86_64/op_sget.S" {"helper":"artGetShortStaticFromCode"} +%include "x86_64/op_sget.S" {"helper":"MterpGetShortStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S index aa223434cf..1e98e28a92 100644 --- a/runtime/interpreter/mterp/x86_64/op_sget_wide.S +++ b/runtime/interpreter/mterp/x86_64/op_sget_wide.S @@ -1 +1 @@ -%include "x86_64/op_sget.S" {"helper":"artGet64StaticFromCode", "wide":"1"} +%include "x86_64/op_sget.S" {"helper":"MterpGet64Static", "wide":"1"} diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S index e92b03273b..9705619900 100644 --- a/runtime/interpreter/mterp/x86_64/op_sput.S +++ b/runtime/interpreter/mterp/x86_64/op_sput.S @@ -1,4 +1,4 @@ -%default { "helper":"artSet32StaticFromCode"} +%default { "helper":"MterpSet32Static"} /* * General SPUT handler wrapper. * diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S index 8718915cb2..8bf4a62328 100644 --- a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S +++ b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S @@ -1 +1 @@ -%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "x86_64/op_sput.S" {"helper":"MterpSetBooleanStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S index 8718915cb2..5bb26ebed5 100644 --- a/runtime/interpreter/mterp/x86_64/op_sput_byte.S +++ b/runtime/interpreter/mterp/x86_64/op_sput_byte.S @@ -1 +1 @@ -%include "x86_64/op_sput.S" {"helper":"artSet8StaticFromCode"} +%include "x86_64/op_sput.S" {"helper":"MterpSetByteStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S index 2fe9d14816..42b244e2bb 100644 --- a/runtime/interpreter/mterp/x86_64/op_sput_char.S +++ b/runtime/interpreter/mterp/x86_64/op_sput_char.S @@ -1 +1 @@ -%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "x86_64/op_sput.S" {"helper":"MterpSetCharStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S index 2fe9d14816..9670092aaf 100644 --- a/runtime/interpreter/mterp/x86_64/op_sput_short.S +++ b/runtime/interpreter/mterp/x86_64/op_sput_short.S @@ -1 +1 @@ -%include "x86_64/op_sput.S" {"helper":"artSet16StaticFromCode"} +%include "x86_64/op_sput.S" {"helper":"MterpSetShortStatic"} diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S index c4bc269eb6..a21bcb5dd5 100644 --- a/runtime/interpreter/mterp/x86_64/op_sput_wide.S +++ b/runtime/interpreter/mterp/x86_64/op_sput_wide.S @@ -3,13 +3,13 @@ * */ /* sput-wide vAA, field@BBBB */ - .extern artSet64IndirectStaticFromMterp + .extern MterpSet64Static EXPORT_PC movzwq 2(rPC), OUT_ARG0 # field ref BBBB - movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer - leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[AA] + leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA] + movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer movq rSELF, OUT_ARG3 # self - call SYMBOL(artSet64IndirectStaticFromMterp) + call SYMBOL(MterpSet64Static) testb %al, %al jnz MterpException ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc index 100f476b43..bb92ca7ae6 100644 --- a/runtime/native/dalvik_system_ZygoteHooks.cc +++ b/runtime/native/dalvik_system_ZygoteHooks.cc @@ -74,12 +74,35 @@ static void EnableDebugger() { } } -static void DoCollectNonDebuggableCallback(Thread* thread, void* data ATTRIBUTE_UNUSED) +class ClassSet { + public: + explicit ClassSet(Thread* const self) : hs_(self) {} + + void AddClass(ObjPtr<mirror::Class> klass) REQUIRES(Locks::mutator_lock_) { + for (Handle<mirror::Class> k : class_set_) { + if (k.Get() == klass.Ptr()) { + return; + } + } + class_set_.push_back(hs_.NewHandle<mirror::Class>(klass)); + } + + const std::vector<Handle<mirror::Class>>& GetClasses() const { + return class_set_; + } + + private: + VariableSizedHandleScope hs_; + std::vector<Handle<mirror::Class>> class_set_; +}; + +static void DoCollectNonDebuggableCallback(Thread* thread, void* data) REQUIRES(Locks::mutator_lock_) { class NonDebuggableStacksVisitor : public StackVisitor { public: - explicit NonDebuggableStacksVisitor(Thread* t) - : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} + NonDebuggableStacksVisitor(Thread* t, ClassSet* class_set) + : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), + class_set_(class_set) {} ~NonDebuggableStacksVisitor() OVERRIDE {} @@ -87,7 +110,7 @@ static void DoCollectNonDebuggableCallback(Thread* thread, void* data ATTRIBUTE_ if (GetMethod()->IsRuntimeMethod()) { return true; } - NonDebuggableClasses::AddNonDebuggableClass(GetMethod()->GetDeclaringClass()); + class_set_->AddClass(GetMethod()->GetDeclaringClass()); if (kIsDebugBuild) { LOG(INFO) << GetMethod()->GetDeclaringClass()->PrettyClass() << " might not be fully debuggable/deoptimizable due to " @@ -95,16 +118,36 @@ static void DoCollectNonDebuggableCallback(Thread* thread, void* data ATTRIBUTE_ } return true; } + + private: + ClassSet* class_set_; }; - NonDebuggableStacksVisitor visitor(thread); + NonDebuggableStacksVisitor visitor(thread, reinterpret_cast<ClassSet*>(data)); visitor.WalkStack(); } -static void CollectNonDebuggableClasses() { +static void CollectNonDebuggableClasses() REQUIRES(!Locks::mutator_lock_) { Runtime* const runtime = Runtime::Current(); - ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false); - MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); - runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, nullptr); + Thread* const self = Thread::Current(); + // Get the mutator lock. + ScopedObjectAccess soa(self); + ClassSet classes(self); + { + // Drop the mutator lock. + self->TransitionFromRunnableToSuspended(art::ThreadState::kNative); + { + // Get it back with a suspend all. + ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", + /*long_suspend*/false); + MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); + runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes); + } + // Recover the shared lock before we leave this scope. + self->TransitionFromSuspendedToRunnable(); + } + for (Handle<mirror::Class> klass : classes.GetClasses()) { + NonDebuggableClasses::AddNonDebuggableClass(klass.Get()); + } } static void EnableDebugFeatures(uint32_t debug_flags) { diff --git a/runtime/native/java_lang_Void.cc b/runtime/native/java_lang_Void.cc new file mode 100644 index 0000000000..96bfd1b4fb --- /dev/null +++ b/runtime/native/java_lang_Void.cc @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "java_lang_Void.h" + +#include "class_linker.h" +#include "jni_internal.h" +#include "runtime.h" +#include "scoped_fast_native_object_access-inl.h" + +namespace art { + +static jclass Void_lookupType(JNIEnv* env, jclass) { + ScopedFastNativeObjectAccess soa(env); + return soa.AddLocalReference<jclass>( + Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kPrimitiveVoid)); +} + +static JNINativeMethod gMethods[] = { + FAST_NATIVE_METHOD(Void, lookupType, "()Ljava/lang/Class;"), +}; + +void register_java_lang_Void(JNIEnv* env) { + REGISTER_NATIVE_METHODS("java/lang/Void"); +} + +} // namespace art diff --git a/runtime/native/java_lang_Void.h b/runtime/native/java_lang_Void.h new file mode 100644 index 0000000000..8777d8068c --- /dev/null +++ b/runtime/native/java_lang_Void.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_VOID_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_VOID_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_Void(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_VOID_H_ diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h index b72afd8299..0c94dc03a7 100644 --- a/runtime/non_debuggable_classes.h +++ b/runtime/non_debuggable_classes.h @@ -35,7 +35,8 @@ struct NonDebuggableClasses { return non_debuggable_classes; } - static void AddNonDebuggableClass(ObjPtr<mirror::Class> klass) REQUIRES(Locks::mutator_lock_); + static void AddNonDebuggableClass(ObjPtr<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_); private: static std::vector<jclass> non_debuggable_classes; diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 5ae2fc51b7..48bf1e72a4 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -430,8 +430,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& // starts up. LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. " << "Allow oat file use. This is potentially dangerous."; - } else if (file.GetOatHeader().GetImageFileLocationOatChecksum() - != GetCombinedImageChecksum()) { + } else if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) { VLOG(oat) << "Oat image checksum does not match image checksum."; return kOatBootImageOutOfDate; } @@ -726,68 +725,81 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() { return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr; } -const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() { - if (!image_info_load_attempted_) { - image_info_load_attempted_ = true; - - Runtime* runtime = Runtime::Current(); - std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces(); - if (!image_spaces.empty()) { - cached_image_info_.location = image_spaces[0]->GetImageLocation(); - - if (isa_ == kRuntimeISA) { - const ImageHeader& image_header = image_spaces[0]->GetImageHeader(); - cached_image_info_.oat_checksum = image_header.GetOatChecksum(); - cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>( - image_header.GetOatDataBegin()); - cached_image_info_.patch_delta = image_header.GetPatchDelta(); - } else { - std::string error_msg; - std::unique_ptr<ImageHeader> image_header( - gc::space::ImageSpace::ReadImageHeader(cached_image_info_.location.c_str(), - isa_, - &error_msg)); - CHECK(image_header != nullptr) << error_msg; - cached_image_info_.oat_checksum = image_header->GetOatChecksum(); - cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>( - image_header->GetOatDataBegin()); - cached_image_info_.patch_delta = image_header->GetPatchDelta(); - } - } - image_info_load_succeeded_ = (!image_spaces.empty()); +// TODO: Use something better than xor for the combined image checksum. +std::unique_ptr<OatFileAssistant::ImageInfo> +OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) { + CHECK(error_msg != nullptr); - combined_image_checksum_ = CalculateCombinedImageChecksum(isa_); + // Use the currently loaded image to determine the image locations for all + // the image spaces, regardless of the isa requested. Otherwise we would + // need to read from the boot image's oat file to determine the rest of the + // image locations in the case of multi-image. + Runtime* runtime = Runtime::Current(); + std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces(); + if (image_spaces.empty()) { + *error_msg = "There are no boot image spaces"; + return nullptr; } - return image_info_load_succeeded_ ? &cached_image_info_ : nullptr; -} -// TODO: Use something better than xor. -uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) { - uint32_t checksum = 0; - std::vector<gc::space::ImageSpace*> image_spaces = - Runtime::Current()->GetHeap()->GetBootImageSpaces(); + std::unique_ptr<ImageInfo> info(new ImageInfo()); + info->location = image_spaces[0]->GetImageLocation(); + + // TODO: Special casing on isa == kRuntimeISA is presumably motivated by + // performance: 'it's faster to use an already loaded image header than read + // the image header from disk'. But the loaded image is not necessarily the + // same as kRuntimeISA, so this behavior is suspect (b/35659889). if (isa == kRuntimeISA) { + const ImageHeader& image_header = image_spaces[0]->GetImageHeader(); + info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()); + info->patch_delta = image_header.GetPatchDelta(); + + info->oat_checksum = 0; for (gc::space::ImageSpace* image_space : image_spaces) { - checksum ^= image_space->GetImageHeader().GetOatChecksum(); + info->oat_checksum ^= image_space->GetImageHeader().GetOatChecksum(); } } else { + std::unique_ptr<ImageHeader> image_header( + gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg)); + if (image_header == nullptr) { + return nullptr; + } + info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()); + info->patch_delta = image_header->GetPatchDelta(); + + info->oat_checksum = 0; for (gc::space::ImageSpace* image_space : image_spaces) { std::string location = image_space->GetImageLocation(); - std::string error_msg; - std::unique_ptr<ImageHeader> image_header( - gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, &error_msg)); - CHECK(image_header != nullptr) << error_msg; - checksum ^= image_header->GetOatChecksum(); + image_header.reset( + gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, error_msg)); + if (image_header == nullptr) { + return nullptr; + } + info->oat_checksum ^= image_header->GetOatChecksum(); } } - return checksum; + return info; } -uint32_t OatFileAssistant::GetCombinedImageChecksum() { +const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() { if (!image_info_load_attempted_) { - GetImageInfo(); + image_info_load_attempted_ = true; + std::string error_msg; + cached_image_info_ = ImageInfo::GetRuntimeImageInfo(isa_, &error_msg); + if (cached_image_info_ == nullptr) { + LOG(WARNING) << "Unable to get runtime image info: " << error_msg; + } + } + return cached_image_info_.get(); +} + +uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) { + std::string error_msg; + std::unique_ptr<ImageInfo> info = ImageInfo::GetRuntimeImageInfo(isa, &error_msg); + if (info == nullptr) { + LOG(WARNING) << "Unable to get runtime image info for checksum: " << error_msg; + return 0; } - return combined_image_checksum_; + return info->oat_checksum; } OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() { diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h index 3ede29f5e0..eec87f0768 100644 --- a/runtime/oat_file_assistant.h +++ b/runtime/oat_file_assistant.h @@ -284,6 +284,9 @@ class OatFileAssistant { uintptr_t oat_data_begin = 0; int32_t patch_delta = 0; std::string location; + + static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa, + std::string* error_msg); }; class OatFileInfo { @@ -414,8 +417,6 @@ class OatFileAssistant { // The caller shouldn't clean up or free the returned pointer. const ImageInfo* GetImageInfo(); - uint32_t GetCombinedImageChecksum(); - // To implement Lock(), we lock a dummy file where the oat file would go // (adding ".flock" to the target file name) and retain the lock for the // remaining lifetime of the OatFileAssistant object. @@ -445,9 +446,7 @@ class OatFileAssistant { // TODO: The image info should probably be moved out of the oat file // assistant to an image file manager. bool image_info_load_attempted_ = false; - bool image_info_load_succeeded_ = false; - ImageInfo cached_image_info_; - uint32_t combined_image_checksum_ = 0; + std::unique_ptr<ImageInfo> cached_image_info_; DISALLOW_COPY_AND_ASSIGN(OatFileAssistant); }; diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc index 976ce66f11..d52f0ea290 100644 --- a/runtime/openjdkjvmti/ti_heap.cc +++ b/runtime/openjdkjvmti/ti_heap.cc @@ -50,25 +50,28 @@ jint ReportString(art::ObjPtr<art::mirror::Object> obj, if (UNLIKELY(cb->string_primitive_value_callback != nullptr) && obj->IsString()) { art::ObjPtr<art::mirror::String> str = obj->AsString(); int32_t string_length = str->GetLength(); - jvmtiError alloc_error; - JvmtiUniquePtr<uint16_t[]> data = AllocJvmtiUniquePtr<uint16_t[]>(env, - string_length, - &alloc_error); - if (data == nullptr) { - // TODO: Not really sure what to do here. Should we abort the iteration and go all the way - // back? For now just warn. - LOG(WARNING) << "Unable to allocate buffer for string reporting! Silently dropping value."; - return 0; - } + JvmtiUniquePtr<uint16_t[]> data; - if (str->IsCompressed()) { - uint8_t* compressed_data = str->GetValueCompressed(); - for (int32_t i = 0; i != string_length; ++i) { - data[i] = compressed_data[i]; + if (string_length > 0) { + jvmtiError alloc_error; + data = AllocJvmtiUniquePtr<uint16_t[]>(env, string_length, &alloc_error); + if (data == nullptr) { + // TODO: Not really sure what to do here. Should we abort the iteration and go all the way + // back? For now just warn. + LOG(WARNING) << "Unable to allocate buffer for string reporting! Silently dropping value." + << " >" << str->ToModifiedUtf8() << "<"; + return 0; + } + + if (str->IsCompressed()) { + uint8_t* compressed_data = str->GetValueCompressed(); + for (int32_t i = 0; i != string_length; ++i) { + data[i] = compressed_data[i]; + } + } else { + // Can copy directly. + memcpy(data.get(), str->GetValue(), string_length * sizeof(uint16_t)); } - } else { - // Can copy directly. - memcpy(data.get(), str->GetValue(), string_length * sizeof(uint16_t)); } const jlong class_tag = tag_table->GetTagOrZero(obj->GetClass()); @@ -159,6 +162,424 @@ jint ReportPrimitiveArray(art::ObjPtr<art::mirror::Object> obj, return 0; } +template <typename UserData> +bool VisitorFalse(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED, + art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED, + art::ArtField& field ATTRIBUTE_UNUSED, + size_t field_index ATTRIBUTE_UNUSED, + UserData* user_data ATTRIBUTE_UNUSED) { + return false; +} + +template <typename UserData, bool kCallVisitorOnRecursion> +class FieldVisitor { + public: + // Report the contents of a primitive fields of the given object, if a callback is set. + template <typename StaticPrimitiveVisitor, + typename StaticReferenceVisitor, + typename InstancePrimitiveVisitor, + typename InstanceReferenceVisitor> + static bool ReportFields(art::ObjPtr<art::mirror::Object> obj, + UserData* user_data, + StaticPrimitiveVisitor& static_prim_visitor, + StaticReferenceVisitor& static_ref_visitor, + InstancePrimitiveVisitor& instance_prim_visitor, + InstanceReferenceVisitor& instance_ref_visitor) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + FieldVisitor fv(user_data); + + if (obj->IsClass()) { + // When visiting a class, we only visit the static fields of the given class. No field of + // superclasses is visited. + art::ObjPtr<art::mirror::Class> klass = obj->AsClass(); + // Only report fields on resolved classes. We need valid field data. + if (!klass->IsResolved()) { + return false; + } + return fv.ReportFieldsImpl(nullptr, + obj->AsClass(), + obj->AsClass()->IsInterface(), + static_prim_visitor, + static_ref_visitor, + instance_prim_visitor, + instance_ref_visitor); + } else { + // See comment above. Just double-checking here, but an instance *should* mean the class was + // resolved. + DCHECK(obj->GetClass()->IsResolved() || obj->GetClass()->IsErroneousResolved()); + return fv.ReportFieldsImpl(obj, + obj->GetClass(), + false, + static_prim_visitor, + static_ref_visitor, + instance_prim_visitor, + instance_ref_visitor); + } + } + + private: + explicit FieldVisitor(UserData* user_data) : user_data_(user_data) {} + + // Report the contents of fields of the given object. If obj is null, report the static fields, + // otherwise the instance fields. + template <typename StaticPrimitiveVisitor, + typename StaticReferenceVisitor, + typename InstancePrimitiveVisitor, + typename InstanceReferenceVisitor> + bool ReportFieldsImpl(art::ObjPtr<art::mirror::Object> obj, + art::ObjPtr<art::mirror::Class> klass, + bool skip_java_lang_object, + StaticPrimitiveVisitor& static_prim_visitor, + StaticReferenceVisitor& static_ref_visitor, + InstancePrimitiveVisitor& instance_prim_visitor, + InstanceReferenceVisitor& instance_ref_visitor) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + // Compute the offset of field indices. + size_t interface_field_count = CountInterfaceFields(klass); + + size_t tmp; + bool aborted = ReportFieldsRecursive(obj, + klass, + interface_field_count, + skip_java_lang_object, + static_prim_visitor, + static_ref_visitor, + instance_prim_visitor, + instance_ref_visitor, + &tmp); + return aborted; + } + + // Visit primitive fields in an object (instance). Return true if the visit was aborted. + template <typename StaticPrimitiveVisitor, + typename StaticReferenceVisitor, + typename InstancePrimitiveVisitor, + typename InstanceReferenceVisitor> + bool ReportFieldsRecursive(art::ObjPtr<art::mirror::Object> obj, + art::ObjPtr<art::mirror::Class> klass, + size_t interface_fields, + bool skip_java_lang_object, + StaticPrimitiveVisitor& static_prim_visitor, + StaticReferenceVisitor& static_ref_visitor, + InstancePrimitiveVisitor& instance_prim_visitor, + InstanceReferenceVisitor& instance_ref_visitor, + size_t* field_index_out) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + DCHECK(klass != nullptr); + size_t field_index; + if (klass->GetSuperClass() == nullptr) { + // j.l.Object. Start with the fields from interfaces. + field_index = interface_fields; + if (skip_java_lang_object) { + *field_index_out = field_index; + return false; + } + } else { + // Report superclass fields. + if (kCallVisitorOnRecursion) { + if (ReportFieldsRecursive(obj, + klass->GetSuperClass(), + interface_fields, + skip_java_lang_object, + static_prim_visitor, + static_ref_visitor, + instance_prim_visitor, + instance_ref_visitor, + &field_index)) { + return true; + } + } else { + // Still call, but with empty visitor. This is required for correct counting. + ReportFieldsRecursive(obj, + klass->GetSuperClass(), + interface_fields, + skip_java_lang_object, + VisitorFalse<UserData>, + VisitorFalse<UserData>, + VisitorFalse<UserData>, + VisitorFalse<UserData>, + &field_index); + } + } + + // Now visit fields for the current klass. + + for (auto& static_field : klass->GetSFields()) { + if (static_field.IsPrimitiveType()) { + if (static_prim_visitor(obj, + klass, + static_field, + field_index, + user_data_)) { + return true; + } + } else { + if (static_ref_visitor(obj, + klass, + static_field, + field_index, + user_data_)) { + return true; + } + } + field_index++; + } + + for (auto& instance_field : klass->GetIFields()) { + if (instance_field.IsPrimitiveType()) { + if (instance_prim_visitor(obj, + klass, + instance_field, + field_index, + user_data_)) { + return true; + } + } else { + if (instance_ref_visitor(obj, + klass, + instance_field, + field_index, + user_data_)) { + return true; + } + } + field_index++; + } + + *field_index_out = field_index; + return false; + } + + // Implements a visit of the implemented interfaces of a given class. + template <typename T> + struct RecursiveInterfaceVisit { + static void VisitStatic(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + RecursiveInterfaceVisit rv; + rv.Visit(self, klass, visitor); + } + + void Visit(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + // First visit the parent, to get the order right. + // (We do this in preparation for actual visiting of interface fields.) + if (klass->GetSuperClass() != nullptr) { + Visit(self, klass->GetSuperClass(), visitor); + } + for (uint32_t i = 0; i != klass->NumDirectInterfaces(); ++i) { + art::ObjPtr<art::mirror::Class> inf_klass = + art::mirror::Class::GetDirectInterface(self, klass, i); + DCHECK(inf_klass != nullptr); + VisitInterface(self, inf_klass, visitor); + } + } + + void VisitInterface(art::Thread* self, art::ObjPtr<art::mirror::Class> inf_klass, T& visitor) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + auto it = visited_interfaces.find(inf_klass.Ptr()); + if (it != visited_interfaces.end()) { + return; + } + visited_interfaces.insert(inf_klass.Ptr()); + + // Let the visitor know about this one. Note that this order is acceptable, as the ordering + // of these fields never matters for known visitors. + visitor(inf_klass); + + // Now visit the superinterfaces. + for (uint32_t i = 0; i != inf_klass->NumDirectInterfaces(); ++i) { + art::ObjPtr<art::mirror::Class> super_inf_klass = + art::mirror::Class::GetDirectInterface(self, inf_klass, i); + DCHECK(super_inf_klass != nullptr); + VisitInterface(self, super_inf_klass, visitor); + } + } + + std::unordered_set<art::mirror::Class*> visited_interfaces; + }; + + // Counting interface fields. Note that we cannot use the interface table, as that only contains + // "non-marker" interfaces (= interfaces with methods). + static size_t CountInterfaceFields(art::ObjPtr<art::mirror::Class> klass) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + size_t count = 0; + auto visitor = [&count](art::ObjPtr<art::mirror::Class> inf_klass) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + DCHECK(inf_klass->IsInterface()); + DCHECK_EQ(0u, inf_klass->NumInstanceFields()); + count += inf_klass->NumStaticFields(); + }; + RecursiveInterfaceVisit<decltype(visitor)>::VisitStatic(art::Thread::Current(), klass, visitor); + return count; + + // TODO: Implement caching. + } + + UserData* user_data_; +}; + +// Debug helper. Prints the structure of an object. +template <bool kStatic, bool kRef> +struct DumpVisitor { + static bool Callback(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED, + art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED, + art::ArtField& field, + size_t field_index, + void* user_data ATTRIBUTE_UNUSED) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + LOG(ERROR) << (kStatic ? "static " : "instance ") + << (kRef ? "ref " : "primitive ") + << field.PrettyField() + << " @ " + << field_index; + return false; + } +}; +ATTRIBUTE_UNUSED +void DumpObjectFields(art::ObjPtr<art::mirror::Object> obj) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + if (obj->IsClass()) { + FieldVisitor<void, false>:: ReportFields(obj, + nullptr, + DumpVisitor<true, false>::Callback, + DumpVisitor<true, true>::Callback, + DumpVisitor<false, false>::Callback, + DumpVisitor<false, true>::Callback); + } else { + FieldVisitor<void, true>::ReportFields(obj, + nullptr, + DumpVisitor<true, false>::Callback, + DumpVisitor<true, true>::Callback, + DumpVisitor<false, false>::Callback, + DumpVisitor<false, true>::Callback); + } +} + +class ReportPrimitiveField { + public: + static bool Report(art::ObjPtr<art::mirror::Object> obj, + ObjectTagTable* tag_table, + const jvmtiHeapCallbacks* cb, + const void* user_data) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + if (UNLIKELY(cb->primitive_field_callback != nullptr)) { + jlong class_tag = tag_table->GetTagOrZero(obj->GetClass()); + ReportPrimitiveField rpf(tag_table, class_tag, cb, user_data); + if (obj->IsClass()) { + return FieldVisitor<ReportPrimitiveField, false>::ReportFields( + obj, + &rpf, + ReportPrimitiveFieldCallback<true>, + VisitorFalse<ReportPrimitiveField>, + VisitorFalse<ReportPrimitiveField>, + VisitorFalse<ReportPrimitiveField>); + } else { + return FieldVisitor<ReportPrimitiveField, true>::ReportFields( + obj, + &rpf, + VisitorFalse<ReportPrimitiveField>, + VisitorFalse<ReportPrimitiveField>, + ReportPrimitiveFieldCallback<false>, + VisitorFalse<ReportPrimitiveField>); + } + } + return false; + } + + + private: + ReportPrimitiveField(ObjectTagTable* tag_table, + jlong class_tag, + const jvmtiHeapCallbacks* cb, + const void* user_data) + : tag_table_(tag_table), class_tag_(class_tag), cb_(cb), user_data_(user_data) {} + + template <bool kReportStatic> + static bool ReportPrimitiveFieldCallback(art::ObjPtr<art::mirror::Object> obj, + art::ObjPtr<art::mirror::Class> klass, + art::ArtField& field, + size_t field_index, + ReportPrimitiveField* user_data) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + art::Primitive::Type art_prim_type = field.GetTypeAsPrimitiveType(); + jvmtiPrimitiveType prim_type = + static_cast<jvmtiPrimitiveType>(art::Primitive::Descriptor(art_prim_type)[0]); + DCHECK(prim_type == JVMTI_PRIMITIVE_TYPE_BOOLEAN || + prim_type == JVMTI_PRIMITIVE_TYPE_BYTE || + prim_type == JVMTI_PRIMITIVE_TYPE_CHAR || + prim_type == JVMTI_PRIMITIVE_TYPE_SHORT || + prim_type == JVMTI_PRIMITIVE_TYPE_INT || + prim_type == JVMTI_PRIMITIVE_TYPE_LONG || + prim_type == JVMTI_PRIMITIVE_TYPE_FLOAT || + prim_type == JVMTI_PRIMITIVE_TYPE_DOUBLE); + jvmtiHeapReferenceInfo info; + info.field.index = field_index; + + jvalue value; + memset(&value, 0, sizeof(jvalue)); + art::ObjPtr<art::mirror::Object> src = kReportStatic ? klass : obj; + switch (art_prim_type) { + case art::Primitive::Type::kPrimBoolean: + value.z = field.GetBoolean(src) == 0 ? JNI_FALSE : JNI_TRUE; + break; + case art::Primitive::Type::kPrimByte: + value.b = field.GetByte(src); + break; + case art::Primitive::Type::kPrimChar: + value.c = field.GetChar(src); + break; + case art::Primitive::Type::kPrimShort: + value.s = field.GetShort(src); + break; + case art::Primitive::Type::kPrimInt: + value.i = field.GetInt(src); + break; + case art::Primitive::Type::kPrimLong: + value.j = field.GetLong(src); + break; + case art::Primitive::Type::kPrimFloat: + value.f = field.GetFloat(src); + break; + case art::Primitive::Type::kPrimDouble: + value.d = field.GetDouble(src); + break; + case art::Primitive::Type::kPrimVoid: + case art::Primitive::Type::kPrimNot: { + LOG(FATAL) << "Should not reach here"; + UNREACHABLE(); + } + } + + jlong obj_tag = user_data->tag_table_->GetTagOrZero(src.Ptr()); + const jlong saved_obj_tag = obj_tag; + + jint ret = user_data->cb_->primitive_field_callback(kReportStatic + ? JVMTI_HEAP_REFERENCE_STATIC_FIELD + : JVMTI_HEAP_REFERENCE_FIELD, + &info, + user_data->class_tag_, + &obj_tag, + value, + prim_type, + const_cast<void*>(user_data->user_data_)); + + if (saved_obj_tag != obj_tag) { + user_data->tag_table_->Set(src.Ptr(), obj_tag); + } + + if ((ret & JVMTI_VISIT_ABORT) != 0) { + return true; + } + + return false; + } + + ObjectTagTable* tag_table_; + jlong class_tag_; + const jvmtiHeapCallbacks* cb_; + const void* user_data_; +}; + struct HeapFilter { explicit HeapFilter(jint heap_filter) : filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0), @@ -289,7 +710,12 @@ static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg ithd->stop_reports = (array_ret & JVMTI_VISIT_ABORT) != 0; } - // TODO Implement primitive field callback. + if (!ithd->stop_reports) { + ithd->stop_reports = ReportPrimitiveField::Report(obj, + ithd->heap_util->GetTags(), + ithd->callbacks, + ithd->user_data); + } } jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env, @@ -565,64 +991,50 @@ class FollowReferencesHelper FINAL { return; } - // TODO: We'll probably have to rewrite this completely with our own visiting logic, if we - // want to have a chance of getting the field indices computed halfway efficiently. For - // now, ignore them altogether. - - struct InstanceReferenceVisitor { - explicit InstanceReferenceVisitor(FollowReferencesHelper* helper_) - : helper(helper_), stop_reports(false) {} - - void operator()(art::mirror::Object* src, - art::MemberOffset field_offset, - bool is_static ATTRIBUTE_UNUSED) const - REQUIRES_SHARED(art::Locks::mutator_lock_) - REQUIRES(!*helper->tag_table_->GetAllowDisallowLock()) { - if (stop_reports) { - return; - } - - art::mirror::Object* trg = src->GetFieldObjectReferenceAddr(field_offset)->AsMirrorPtr(); + // All instance fields. + auto report_instance_field = [&](art::ObjPtr<art::mirror::Object> src, + art::ObjPtr<art::mirror::Class> obj_klass ATTRIBUTE_UNUSED, + art::ArtField& field, + size_t field_index, + void* user_data ATTRIBUTE_UNUSED) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + art::ObjPtr<art::mirror::Object> field_value = field.GetObject(src); + if (field_value != nullptr) { jvmtiHeapReferenceInfo reference_info; memset(&reference_info, 0, sizeof(reference_info)); // TODO: Implement spec-compliant numbering. - reference_info.field.index = field_offset.Int32Value(); + reference_info.field.index = field_index; jvmtiHeapReferenceKind kind = - field_offset.Int32Value() == art::mirror::Object::ClassOffset().Int32Value() + field.GetOffset().Int32Value() == art::mirror::Object::ClassOffset().Int32Value() ? JVMTI_HEAP_REFERENCE_CLASS : JVMTI_HEAP_REFERENCE_FIELD; const jvmtiHeapReferenceInfo* reference_info_ptr = kind == JVMTI_HEAP_REFERENCE_CLASS ? nullptr : &reference_info; - stop_reports = !helper->ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src, trg); + return !ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src.Ptr(), field_value.Ptr()); } - - void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) - const { - LOG(FATAL) << "Unreachable"; - } - void VisitRootIfNonNull( - art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const { - LOG(FATAL) << "Unreachable"; - } - - // "mutable" required by the visitor API. - mutable FollowReferencesHelper* helper; - mutable bool stop_reports; + return false; }; + stop_reports_ = FieldVisitor<void, true>::ReportFields(obj, + nullptr, + VisitorFalse<void>, + VisitorFalse<void>, + VisitorFalse<void>, + report_instance_field); + if (stop_reports_) { + return; + } - InstanceReferenceVisitor visitor(this); - // Visit references, not native roots. - obj->VisitReferences<false>(visitor, art::VoidFunctor()); - - stop_reports_ = visitor.stop_reports; - - if (!stop_reports_) { - jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_); - stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0; + jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_); + stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0; + if (stop_reports_) { + return; } + + stop_reports_ = ReportPrimitiveField::Report(obj, tag_table_, callbacks_, user_data_); } void VisitArray(art::mirror::Object* array) @@ -716,26 +1128,38 @@ class FollowReferencesHelper FINAL { DCHECK_EQ(h_klass.Get(), klass); // Declared static fields. - for (auto& field : klass->GetSFields()) { - if (!field.IsPrimitiveType()) { - art::ObjPtr<art::mirror::Object> field_value = field.GetObject(klass); - if (field_value != nullptr) { - jvmtiHeapReferenceInfo reference_info; - memset(&reference_info, 0, sizeof(reference_info)); + auto report_static_field = [&](art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED, + art::ObjPtr<art::mirror::Class> obj_klass, + art::ArtField& field, + size_t field_index, + void* user_data ATTRIBUTE_UNUSED) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + art::ObjPtr<art::mirror::Object> field_value = field.GetObject(obj_klass); + if (field_value != nullptr) { + jvmtiHeapReferenceInfo reference_info; + memset(&reference_info, 0, sizeof(reference_info)); - // TODO: Implement spec-compliant numbering. - reference_info.field.index = field.GetOffset().Int32Value(); + reference_info.field.index = static_cast<jint>(field_index); - stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD, - &reference_info, - klass, - field_value.Ptr()); - if (stop_reports_) { - return; - } - } + return !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD, + &reference_info, + obj_klass.Ptr(), + field_value.Ptr()); } + return false; + }; + stop_reports_ = FieldVisitor<void, false>::ReportFields(klass, + nullptr, + VisitorFalse<void>, + report_static_field, + VisitorFalse<void>, + VisitorFalse<void>); + if (stop_reports_) { + return; } + + stop_reports_ = ReportPrimitiveField::Report(klass, tag_table_, callbacks_, user_data_); } void MaybeEnqueue(art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) { diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 69dcfebcb1..42a0ca9373 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -114,6 +114,7 @@ #include "native/java_lang_Thread.h" #include "native/java_lang_Throwable.h" #include "native/java_lang_VMClassLoader.h" +#include "native/java_lang_Void.h" #include "native/java_lang_invoke_MethodHandleImpl.h" #include "native/java_lang_ref_FinalizerReference.h" #include "native/java_lang_ref_Reference.h" @@ -1556,6 +1557,7 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) { register_java_lang_Thread(env); register_java_lang_Throwable(env); register_java_lang_VMClassLoader(env); + register_java_lang_Void(env); register_java_util_concurrent_atomic_AtomicLong(env); register_libcore_util_CharsetUtils(env); register_org_apache_harmony_dalvik_ddmc_DdmServer(env); diff --git a/test/152-dead-large-object/expected.txt b/test/152-dead-large-object/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/152-dead-large-object/expected.txt diff --git a/test/152-dead-large-object/info.txt b/test/152-dead-large-object/info.txt new file mode 100644 index 0000000000..45023cd0b7 --- /dev/null +++ b/test/152-dead-large-object/info.txt @@ -0,0 +1 @@ +Test that large objects are freed properly after a GC. diff --git a/test/152-dead-large-object/src/Main.java b/test/152-dead-large-object/src/Main.java new file mode 100644 index 0000000000..72fd25c2c0 --- /dev/null +++ b/test/152-dead-large-object/src/Main.java @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + static volatile Object a[] = null; + + public static void main(String[] args) { + for (int i = 0; i < 10; ++i) { + a = new Object[i * 300000]; + Runtime.getRuntime().gc(); + } + } +} diff --git a/test/154-gc-loop/src/Main.java b/test/154-gc-loop/src/Main.java index 3a256c109e..69015b65aa 100644 --- a/test/154-gc-loop/src/Main.java +++ b/test/154-gc-loop/src/Main.java @@ -38,7 +38,7 @@ public class Main { } } catch (Exception e) {} System.out.println("Finalize count too large: " + - ((finalizeCounter >= 10) ? Integer.toString(finalizeCounter) : "false")); + ((finalizeCounter >= 15) ? Integer.toString(finalizeCounter) : "false")); } private static native void backgroundProcessState(); diff --git a/test/157-void-class/expected.txt b/test/157-void-class/expected.txt new file mode 100644 index 0000000000..3f61c0b5b0 --- /dev/null +++ b/test/157-void-class/expected.txt @@ -0,0 +1,2 @@ +JNI_OnLoad called +void.class = void diff --git a/test/157-void-class/info.txt b/test/157-void-class/info.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/157-void-class/info.txt diff --git a/test/157-void-class/run b/test/157-void-class/run new file mode 100755 index 0000000000..59e852c8cd --- /dev/null +++ b/test/157-void-class/run @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Copyright (C) 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Let the test build its own core image with --no-image and use verify-profile, +# so that the compiler does not try to initialize classes. This leaves the +# java.lang.Void compile-time verified but uninitialized. +./default-run "$@" --no-image \ + --runtime-option -Ximage-compiler-option \ + --runtime-option --compiler-filter=verify-profile diff --git a/test/157-void-class/src/Main.java b/test/157-void-class/src/Main.java new file mode 100644 index 0000000000..322b705f1d --- /dev/null +++ b/test/157-void-class/src/Main.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import libcore.util.EmptyArray; + +public class Main { + public static void main(String[] args) { + try { + // Check if we're running dalvik or RI. + Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader"); + System.loadLibrary(args[0]); + } catch (ClassNotFoundException e) { + usingRI = true; + // Add expected JNI_OnLoad log line to match expected.txt. + System.out.println("JNI_OnLoad called"); + } + try { + // Initialize all classes needed for old java.lang.Void.TYPE initialization. + Runnable.class.getMethod("run", EmptyArray.CLASS).getReturnType(); + } catch (Exception e) { + throw new Error(e); + } + // Clear the resolved types of the ojluni dex file to make sure there is no entry + // for "V", i.e. void. + clearResolvedTypes(Integer.class); + // With java.lang.Void being compile-time verified but uninitialized, initialize + // it now. Previously, this would indirectly initialize TYPE with the current, + // i.e. zero-initialized, value of TYPE. The only thing that could prevent the + // series of calls leading to this was a cache hit in Class.getDexCacheType() + // which we have prevented by clearing the cache above. + Class<?> voidClass = void.class; + System.out.println("void.class = " + voidClass); + } + + public static void clearResolvedTypes(Class<?> c) { + if (!usingRI) { + nativeClearResolvedTypes(c); + } + } + + public static native void nativeClearResolvedTypes(Class<?> c); + + static boolean usingRI = false; +} diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java index 52f3f84406..e395e283e0 100644 --- a/test/536-checker-intrinsic-optimization/src/Main.java +++ b/test/536-checker-intrinsic-optimization/src/Main.java @@ -330,6 +330,21 @@ public class Main { // Terminate the scope for the CHECK-NOT search at the reference or length comparison, // whichever comes first. /// CHECK: cmp {{w.*,}} {{w.*|#.*}} + + /// CHECK-START-MIPS: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after) + /// CHECK: InvokeVirtual {{.*\.equals.*}} intrinsic:StringEquals + /// CHECK-NOT: beq r0, + /// CHECK-NOT: beqz + /// CHECK-NOT: beqzc + // Terminate the scope for the CHECK-NOT search at the class field or length comparison, + // whichever comes first. + /// CHECK: lw + + /// CHECK-START-MIPS64: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after) + /// CHECK: InvokeVirtual {{.*\.equals.*}} intrinsic:StringEquals + /// CHECK-NOT: beqzc + // Terminate the scope for the CHECK-NOT search at the reference comparison. + /// CHECK: beqc public static boolean stringArgumentNotNull(Object obj) { obj.getClass(); return "foo".equals(obj); @@ -384,6 +399,22 @@ public class Main { /// CHECK-NOT: ldr {{w\d+}}, [{{x\d+}}] /// CHECK-NOT: ldr {{w\d+}}, [{{x\d+}}, #0] /// CHECK: cmp {{w\d+}}, {{w\d+|#.*}} + + // Test is brittle as it depends on the class offset being 0. + /// CHECK-START-MIPS: boolean Main.stringArgumentIsString() disassembly (after) + /// CHECK: InvokeVirtual intrinsic:StringEquals + /// CHECK: beq{{(zc)?}} + // Check that we don't try to compare the classes. + /// CHECK-NOT: lw {{r\d+}}, +0({{r\d+}}) + /// CHECK: bne{{c?}} + + // Test is brittle as it depends on the class offset being 0. + /// CHECK-START-MIPS64: boolean Main.stringArgumentIsString() disassembly (after) + /// CHECK: InvokeVirtual intrinsic:StringEquals + /// CHECK: beqzc + // Check that we don't try to compare the classes. + /// CHECK-NOT: lw {{r\d+}}, +0({{r\d+}}) + /// CHECK: bnec public static boolean stringArgumentIsString() { return "foo".equals(myString); } diff --git a/test/640-checker-integer-valueof/expected.txt b/test/640-checker-integer-valueof/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/640-checker-integer-valueof/expected.txt diff --git a/test/640-checker-integer-valueof/info.txt b/test/640-checker-integer-valueof/info.txt new file mode 100644 index 0000000000..51021a4eda --- /dev/null +++ b/test/640-checker-integer-valueof/info.txt @@ -0,0 +1 @@ +Test for Integer.valueOf. diff --git a/test/640-checker-integer-valueof/src/Main.java b/test/640-checker-integer-valueof/src/Main.java new file mode 100644 index 0000000000..0837fd18ee --- /dev/null +++ b/test/640-checker-integer-valueof/src/Main.java @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + /// CHECK-START: java.lang.Integer Main.foo(int) disassembly (after) + /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf + /// CHECK: pAllocObjectInitialized + /// CHECK: Return [<<Integer>>] + public static Integer foo(int a) { + return Integer.valueOf(a); + } + + /// CHECK-START: java.lang.Integer Main.foo2() disassembly (after) + /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf + /// CHECK-NOT: pAllocObjectInitialized + /// CHECK: Return [<<Integer>>] + public static Integer foo2() { + return Integer.valueOf(-42); + } + + /// CHECK-START: java.lang.Integer Main.foo3() disassembly (after) + /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf + /// CHECK-NOT: pAllocObjectInitialized + /// CHECK: Return [<<Integer>>] + public static Integer foo3() { + return Integer.valueOf(42); + } + + /// CHECK-START: java.lang.Integer Main.foo4() disassembly (after) + /// CHECK: <<Integer:l\d+>> InvokeStaticOrDirect method_name:java.lang.Integer.valueOf intrinsic:IntegerValueOf + /// CHECK: pAllocObjectInitialized + /// CHECK: Return [<<Integer>>] + public static Integer foo4() { + return Integer.valueOf(55555); + } + + public static void main(String[] args) { + assertEqual("42", foo(intField)); + assertEqual(foo(intField), foo(intField2)); + assertEqual("-42", foo2()); + assertEqual("42", foo3()); + assertEqual("55555", foo4()); + assertEqual("55555", foo(intField3)); + assertEqual("-129", foo(intFieldMinus129)); + assertEqual("-128", foo(intFieldMinus128)); + assertEqual(foo(intFieldMinus128), foo(intFieldMinus128)); + assertEqual("-127", foo(intFieldMinus127)); + assertEqual(foo(intFieldMinus127), foo(intFieldMinus127)); + assertEqual("126", foo(intField126)); + assertEqual(foo(intField126), foo(intField126)); + assertEqual("127", foo(intField127)); + assertEqual(foo(intField127), foo(intField127)); + assertEqual("128", foo(intField128)); + } + + static void assertEqual(String a, Integer b) { + if (!a.equals(b.toString())) { + throw new Error("Expected " + a + ", got " + b); + } + } + + static void assertEqual(Integer a, Integer b) { + if (a != b) { + throw new Error("Expected " + a + ", got " + b); + } + } + + static int intField = 42; + static int intField2 = 42; + static int intField3 = 55555; + + // Edge cases. + static int intFieldMinus129 = -129; + static int intFieldMinus128 = -128; + static int intFieldMinus127 = -127; + static int intField126 = 126; + static int intField127 = 127; + static int intField128 = 128; +} diff --git a/test/906-iterate-heap/expected.txt b/test/906-iterate-heap/expected.txt index 3e857ab003..b6af8435de 100644 --- a/test/906-iterate-heap/expected.txt +++ b/test/906-iterate-heap/expected.txt @@ -18,3 +18,27 @@ 2 1@0 (32, 2xD '0000000000000000000000000000f03f') 2 +10000@0 (static, int, index=3) 0000000000000000 +10001 +10000@0 (static, int, index=11) 0000000000000000 +10001 +10000@0 (static, int, index=0) 0000000000000000 +10001 +10000@0 (static, int, index=1) 0000000000000000 +10001 +10000@0 (instance, int, index=2) 0000000000000000 +10001@0 (instance, byte, index=4) 0000000000000001 +10002@0 (instance, char, index=5) 0000000000000061 +10003@0 (instance, int, index=6) 0000000000000003 +10004@0 (instance, long, index=7) 0000000000000004 +10005@0 (instance, short, index=9) 0000000000000002 +10006 +10000@0 (instance, int, index=3) 0000000000000000 +10001@0 (instance, byte, index=5) 0000000000000001 +10002@0 (instance, char, index=6) 0000000000000061 +10003@0 (instance, int, index=7) 0000000000000003 +10004@0 (instance, long, index=8) 0000000000000004 +10005@0 (instance, short, index=10) 0000000000000002 +10006@0 (instance, double, index=12) 3ff3ae147ae147ae +10007@0 (instance, float, index=13) 000000003f9d70a4 +10008 diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc index 890220ee8d..13c3562b60 100644 --- a/test/906-iterate-heap/iterate_heap.cc +++ b/test/906-iterate-heap/iterate_heap.cc @@ -322,5 +322,92 @@ extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveArray( return env->NewStringUTF(fac.data.c_str()); } +static constexpr const char* GetPrimitiveTypeName(jvmtiPrimitiveType type) { + switch (type) { + case JVMTI_PRIMITIVE_TYPE_BOOLEAN: + return "boolean"; + case JVMTI_PRIMITIVE_TYPE_BYTE: + return "byte"; + case JVMTI_PRIMITIVE_TYPE_CHAR: + return "char"; + case JVMTI_PRIMITIVE_TYPE_SHORT: + return "short"; + case JVMTI_PRIMITIVE_TYPE_INT: + return "int"; + case JVMTI_PRIMITIVE_TYPE_FLOAT: + return "float"; + case JVMTI_PRIMITIVE_TYPE_LONG: + return "long"; + case JVMTI_PRIMITIVE_TYPE_DOUBLE: + return "double"; + } + LOG(FATAL) << "Unknown type " << static_cast<size_t>(type); + UNREACHABLE(); +} + +extern "C" JNIEXPORT jstring JNICALL Java_Main_iterateThroughHeapPrimitiveFields( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) { + struct FindFieldCallbacks { + explicit FindFieldCallbacks(jlong t) : tag_to_find(t) {} + + static jint JNICALL HeapIterationCallback(jlong class_tag ATTRIBUTE_UNUSED, + jlong size ATTRIBUTE_UNUSED, + jlong* tag_ptr ATTRIBUTE_UNUSED, + jint length ATTRIBUTE_UNUSED, + void* user_data ATTRIBUTE_UNUSED) { + return 0; + } + + static jint JNICALL PrimitiveFieldValueCallback(jvmtiHeapReferenceKind kind, + const jvmtiHeapReferenceInfo* info, + jlong class_tag, + jlong* tag_ptr, + jvalue value, + jvmtiPrimitiveType value_type, + void* user_data) { + FindFieldCallbacks* p = reinterpret_cast<FindFieldCallbacks*>(user_data); + if (*tag_ptr >= p->tag_to_find) { + std::ostringstream oss; + oss << *tag_ptr + << '@' + << class_tag + << " (" + << (kind == JVMTI_HEAP_REFERENCE_FIELD ? "instance, " : "static, ") + << GetPrimitiveTypeName(value_type) + << ", index=" + << info->field.index + << ") "; + // Be lazy, always print eight bytes. + static_assert(sizeof(jvalue) == sizeof(uint64_t), "Unexpected jvalue size"); + uint64_t val; + memcpy(&val, &value, sizeof(uint64_t)); // To avoid undefined behavior. + oss << android::base::StringPrintf("%016" PRIx64, val); + + if (!p->data.empty()) { + p->data += "\n"; + } + p->data += oss.str(); + *tag_ptr = *tag_ptr + 1; + } + return 0; + } + + std::string data; + const jlong tag_to_find; + }; + + jvmtiHeapCallbacks callbacks; + memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks)); + callbacks.heap_iteration_callback = FindFieldCallbacks::HeapIterationCallback; + callbacks.primitive_field_callback = FindFieldCallbacks::PrimitiveFieldValueCallback; + + FindFieldCallbacks ffc(tag); + jvmtiError ret = jvmti_env->IterateThroughHeap(0, nullptr, &callbacks, &ffc); + if (JvmtiErrorToException(env, ret)) { + return nullptr; + } + return env->NewStringUTF(ffc.data.c_str()); +} + } // namespace Test906IterateHeap } // namespace art diff --git a/test/906-iterate-heap/src/Main.java b/test/906-iterate-heap/src/Main.java index d4998865b5..365ce0f214 100644 --- a/test/906-iterate-heap/src/Main.java +++ b/test/906-iterate-heap/src/Main.java @@ -119,6 +119,60 @@ public class Main { setTag(dArray, 1); System.out.println(iterateThroughHeapPrimitiveArray(getTag(dArray))); System.out.println(getTag(dArray)); + + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + + doTestPrimitiveFieldsClasses(); + + doTestPrimitiveFieldsIntegral(); + + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + + doTestPrimitiveFieldsFloat(); + + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + } + + private static void doTestPrimitiveFieldsClasses() { + setTag(IntObject.class, 10000); + System.out.println(iterateThroughHeapPrimitiveFields(10000)); + System.out.println(getTag(IntObject.class)); + setTag(IntObject.class, 0); + + setTag(FloatObject.class, 10000); + System.out.println(iterateThroughHeapPrimitiveFields(10000)); + System.out.println(getTag(FloatObject.class)); + setTag(FloatObject.class, 0); + + setTag(Inf1.class, 10000); + System.out.println(iterateThroughHeapPrimitiveFields(10000)); + System.out.println(getTag(Inf1.class)); + setTag(Inf1.class, 0); + + setTag(Inf2.class, 10000); + System.out.println(iterateThroughHeapPrimitiveFields(10000)); + System.out.println(getTag(Inf2.class)); + setTag(Inf2.class, 0); + } + + private static void doTestPrimitiveFieldsIntegral() { + IntObject intObject = new IntObject(); + setTag(intObject, 10000); + System.out.println(iterateThroughHeapPrimitiveFields(10000)); + System.out.println(getTag(intObject)); + } + + private static void doTestPrimitiveFieldsFloat() { + FloatObject floatObject = new FloatObject(); + setTag(floatObject, 10000); + System.out.println(iterateThroughHeapPrimitiveFields(10000)); + System.out.println(getTag(floatObject)); } static class A { @@ -172,6 +226,31 @@ public class Main { return ret; } + private static interface Inf1 { + public final static int A = 1; + } + + private static interface Inf2 extends Inf1 { + public final static int B = 1; + } + + private static class IntObject implements Inf1 { + byte b = (byte)1; + char c= 'a'; + short s = (short)2; + int i = 3; + long l = 4; + Object o = new Object(); + static int sI = 5; + } + + private static class FloatObject extends IntObject implements Inf2 { + float f = 1.23f; + double d = 1.23; + Object p = new Object(); + static int sI = 6; + } + private static native void setTag(Object o, long tag); private static native long getTag(Object o); @@ -188,4 +267,5 @@ public class Main { Class<?> klassFilter); private static native String iterateThroughHeapString(long tag); private static native String iterateThroughHeapPrimitiveArray(long tag); + private static native String iterateThroughHeapPrimitiveFields(long tag); } diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt index 46805d7272..fc2761e800 100644 --- a/test/913-heaps/expected.txt +++ b/test/913-heaps/expected.txt @@ -8,34 +8,34 @@ root@root --(thread)--> 3000@0 [size=132, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- root@root --(jni-global)--> 1@1000 [size=16, length=-1] @@ -49,38 +49,39 @@ root@root --(thread)--> 3000@0 [size=132, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- -[1@0 (32, 'HelloWorld')] +[1@0 (32, 'HelloWorld'), 2@0 (16, '')] 2 +3 2@0 (15, 3xB '010203') 3@0 (16, 2xC '41005a00') 8@0 (32, 2xD '0000000000000000000000000000f03f') @@ -90,18 +91,42 @@ root@root --(thread)--> 3000@0 [size=132, length=-1] 4@0 (18, 3xS '010002000300') 1@0 (14, 2xZ '0001') 23456789 +10000@0 (static, int, index=3) 0000000000000000 +10001 +10000@0 (static, int, index=11) 0000000000000000 +10001 +10000@0 (static, int, index=0) 0000000000000000 +10001 +10000@0 (static, int, index=1) 0000000000000000 +10001 +10000@0 (instance, int, index=2) 0000000000000000 +10001@0 (instance, byte, index=4) 0000000000000001 +10002@0 (instance, char, index=5) 0000000000000061 +10003@0 (instance, int, index=6) 0000000000000003 +10004@0 (instance, long, index=7) 0000000000000004 +10005@0 (instance, short, index=9) 0000000000000002 +10006 +10000@0 (instance, int, index=3) 0000000000000000 +10001@0 (instance, byte, index=5) 0000000000000001 +10002@0 (instance, char, index=6) 0000000000000061 +10003@0 (instance, int, index=7) 0000000000000003 +10004@0 (instance, long, index=8) 0000000000000004 +10005@0 (instance, short, index=10) 0000000000000002 +10006@0 (instance, double, index=12) 3ff3ae147ae147ae +10007@0 (instance, float, index=13) 000000003f9d70a4 +10008 --- klass --- root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1] 0@0 --(array-element@0)--> 1@1000 [size=16, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- root@root --(jni-global)--> 1@1000 [size=16, length=-1] root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1] @@ -109,15 +134,15 @@ root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1] root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1] root@root --(thread)--> 1@1000 [size=16, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- --- heap_filter --- ---- tagged objects @@ -128,41 +153,40 @@ root@root --(thread)--> 1@1000 [size=16, length=-1] ---- untagged objects root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1] root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=132, length=-1] -root@root --(system-class)--> 2@0 [size=32, length=-1] root@root --(thread)--> 3000@0 [size=132, length=-1] 0@0 --(array-element@0)--> 1@1000 [size=16, length=-1] 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- root@root --(jni-global)--> 1@1000 [size=16, length=-1] @@ -170,46 +194,44 @@ root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=13,location= 10])--> 1@1000 [size=16, length=-1] root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1] root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1] -root@root --(system-class)--> 2@0 [size=32, length=-1] root@root --(thread)--> 1@1000 [size=16, length=-1] root@root --(thread)--> 3000@0 [size=132, length=-1] 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] 1002@0 --(superclass)--> 1001@0 [size=123, length=-1] 1@1000 --(class)--> 1000@0 [size=123, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] 2001@0 --(interface)--> 2000@0 [size=124, length=-1] 2@1000 --(class)--> 1000@0 [size=123, length=-1] 3@1001 --(class)--> 1001@0 [size=123, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] 4@1000 --(class)--> 1000@0 [size=123, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- ---- tagged classes root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=132, length=-1] -root@root --(system-class)--> 2@0 [size=32, length=-1] root@root --(thread)--> 3000@0 [size=132, length=-1] 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] @@ -233,7 +255,6 @@ root@root --(thread)--> 3000@0 [size=132, length=-1] 5@1002 --(class)--> 1002@0 [size=123, length=-1] 6@1000 --(class)--> 1000@0 [size=123, length=-1] --- -root@root --(system-class)--> 2@0 [size=32, length=-1] root@root --(thread)--> 3000@0 [size=132, length=-1] 1001@0 --(superclass)--> 1000@0 [size=123, length=-1] 1002@0 --(interface)--> 2001@0 [size=124, length=-1] @@ -260,19 +281,19 @@ root@root --(thread)--> 3000@0 [size=132, length=-1] ---- untagged classes root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 32])--> 1@1000 [size=16, length=-1] 0@0 --(array-element@0)--> 1@1000 [size=16, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- root@root --(jni-global)--> 1@1000 [size=16, length=-1] root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1] @@ -280,17 +301,17 @@ root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 10])--> 1@1000 [size=16, length=-1] root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1] root@root --(thread)--> 1@1000 [size=16, length=-1] -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- -1@1000 --(field@12)--> 3@1001 [size=24, length=-1] -1@1000 --(field@8)--> 2@1000 [size=16, length=-1] -3@1001 --(field@16)--> 4@1000 [size=16, length=-1] -3@1001 --(field@20)--> 5@1002 [size=32, length=-1] -5@1002 --(field@24)--> 6@1000 [size=16, length=-1] -5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1@1000 --(field@2)--> 2@1000 [size=16, length=-1] +1@1000 --(field@3)--> 3@1001 [size=24, length=-1] +3@1001 --(field@4)--> 4@1000 [size=16, length=-1] +3@1001 --(field@5)--> 5@1002 [size=32, length=-1] +5@1002 --(field@8)--> 6@1000 [size=16, length=-1] +5@1002 --(field@9)--> 1@1000 [size=16, length=-1] --- diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc index 99bc48eeec..39fa000195 100644 --- a/test/913-heaps/heaps.cc +++ b/test/913-heaps/heaps.cc @@ -654,5 +654,95 @@ extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveArray( return env->NewStringUTF(fac.data.c_str()); } +static constexpr const char* GetPrimitiveTypeName(jvmtiPrimitiveType type) { + switch (type) { + case JVMTI_PRIMITIVE_TYPE_BOOLEAN: + return "boolean"; + case JVMTI_PRIMITIVE_TYPE_BYTE: + return "byte"; + case JVMTI_PRIMITIVE_TYPE_CHAR: + return "char"; + case JVMTI_PRIMITIVE_TYPE_SHORT: + return "short"; + case JVMTI_PRIMITIVE_TYPE_INT: + return "int"; + case JVMTI_PRIMITIVE_TYPE_FLOAT: + return "float"; + case JVMTI_PRIMITIVE_TYPE_LONG: + return "long"; + case JVMTI_PRIMITIVE_TYPE_DOUBLE: + return "double"; + } + LOG(FATAL) << "Unknown type " << static_cast<size_t>(type); + UNREACHABLE(); +} + +extern "C" JNIEXPORT jstring JNICALL Java_Main_followReferencesPrimitiveFields( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject initial_object) { + struct FindFieldCallbacks { + static jint JNICALL FollowReferencesCallback( + jvmtiHeapReferenceKind reference_kind ATTRIBUTE_UNUSED, + const jvmtiHeapReferenceInfo* reference_info ATTRIBUTE_UNUSED, + jlong class_tag ATTRIBUTE_UNUSED, + jlong referrer_class_tag ATTRIBUTE_UNUSED, + jlong size ATTRIBUTE_UNUSED, + jlong* tag_ptr ATTRIBUTE_UNUSED, + jlong* referrer_tag_ptr ATTRIBUTE_UNUSED, + jint length ATTRIBUTE_UNUSED, + void* user_data ATTRIBUTE_UNUSED) { + return JVMTI_VISIT_OBJECTS; // Continue visiting. + } + + static jint JNICALL PrimitiveFieldValueCallback(jvmtiHeapReferenceKind kind, + const jvmtiHeapReferenceInfo* info, + jlong class_tag, + jlong* tag_ptr, + jvalue value, + jvmtiPrimitiveType value_type, + void* user_data) { + FindFieldCallbacks* p = reinterpret_cast<FindFieldCallbacks*>(user_data); + if (*tag_ptr != 0) { + std::ostringstream oss; + oss << *tag_ptr + << '@' + << class_tag + << " (" + << (kind == JVMTI_HEAP_REFERENCE_FIELD ? "instance, " : "static, ") + << GetPrimitiveTypeName(value_type) + << ", index=" + << info->field.index + << ") "; + // Be lazy, always print eight bytes. + static_assert(sizeof(jvalue) == sizeof(uint64_t), "Unexpected jvalue size"); + uint64_t val; + memcpy(&val, &value, sizeof(uint64_t)); // To avoid undefined behavior. + oss << android::base::StringPrintf("%016" PRIx64, val); + + if (!p->data.empty()) { + p->data += "\n"; + } + p->data += oss.str(); + // Update the tag to test whether that works. + *tag_ptr = *tag_ptr + 1; + } + return 0; + } + + std::string data; + }; + + jvmtiHeapCallbacks callbacks; + memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks)); + callbacks.heap_reference_callback = FindFieldCallbacks::FollowReferencesCallback; + callbacks.primitive_field_callback = FindFieldCallbacks::PrimitiveFieldValueCallback; + + FindFieldCallbacks ffc; + jvmtiError ret = jvmti_env->FollowReferences(0, nullptr, initial_object, &callbacks, &ffc); + if (JvmtiErrorToException(env, ret)) { + return nullptr; + } + return env->NewStringUTF(ffc.data.c_str()); +} + } // namespace Test913Heaps } // namespace art diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java index df89f347e0..66f68834a1 100644 --- a/test/913-heaps/src/Main.java +++ b/test/913-heaps/src/Main.java @@ -25,8 +25,19 @@ public class Main { doTest(); new TestConfig().doFollowReferencesTest(); + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + doStringTest(); + + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + doPrimitiveArrayTest(); + doPrimitiveFieldTest(); + + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); // Test klass filter. System.out.println("--- klass ---"); @@ -53,14 +64,18 @@ public class Main { } public static void doStringTest() throws Exception { - final String str = "HelloWorld"; + final String str = new String("HelloWorld"); + final String str2 = new String(""); Object o = new Object() { String s = str; + String s2 = str2; }; setTag(str, 1); + setTag(str2, 2); System.out.println(Arrays.toString(followReferencesString(o))); System.out.println(getTag(str)); + System.out.println(getTag(str2)); } public static void doPrimitiveArrayTest() throws Exception { @@ -110,6 +125,62 @@ public class Main { System.out.println(getTag(dArray)); } + public static void doPrimitiveFieldTest() throws Exception { + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + + doTestPrimitiveFieldsClasses(); + + doTestPrimitiveFieldsIntegral(); + + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + + doTestPrimitiveFieldsFloat(); + + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + } + + private static void doTestPrimitiveFieldsClasses() { + setTag(IntObject.class, 10000); + System.out.println(followReferencesPrimitiveFields(IntObject.class)); + System.out.println(getTag(IntObject.class)); + setTag(IntObject.class, 0); + + setTag(FloatObject.class, 10000); + System.out.println(followReferencesPrimitiveFields(FloatObject.class)); + System.out.println(getTag(FloatObject.class)); + setTag(FloatObject.class, 0); + + setTag(Inf1.class, 10000); + System.out.println(followReferencesPrimitiveFields(Inf1.class)); + System.out.println(getTag(Inf1.class)); + setTag(Inf1.class, 0); + + setTag(Inf2.class, 10000); + System.out.println(followReferencesPrimitiveFields(Inf2.class)); + System.out.println(getTag(Inf2.class)); + setTag(Inf2.class, 0); + } + + private static void doTestPrimitiveFieldsIntegral() { + IntObject intObject = new IntObject(); + setTag(intObject, 10000); + System.out.println(followReferencesPrimitiveFields(intObject)); + System.out.println(getTag(intObject)); + } + + private static void doTestPrimitiveFieldsFloat() { + FloatObject floatObject = new FloatObject(); + setTag(floatObject, 10000); + System.out.println(followReferencesPrimitiveFields(floatObject)); + System.out.println(getTag(floatObject)); + } + private static void run() { clearStats(); forceGarbageCollection(); @@ -301,6 +372,31 @@ public class Main { } } + private static interface Inf1 { + public final static int A = 1; + } + + private static interface Inf2 extends Inf1 { + public final static int B = 1; + } + + private static class IntObject implements Inf1 { + byte b = (byte)1; + char c= 'a'; + short s = (short)2; + int i = 3; + long l = 4; + Object o = new Object(); + static int sI = 5; + } + + private static class FloatObject extends IntObject implements Inf2 { + float f = 1.23f; + double d = 1.23; + Object p = new Object(); + static int sI = 6; + } + public static class Verifier { // Should roots with vreg=-1 be printed? public final static boolean PRINT_ROOTS_WITH_UNKNOWN_VREG = false; @@ -494,4 +590,5 @@ public class Main { Object initialObject, int stopAfter, int followSet, Object jniRef); public static native String[] followReferencesString(Object initialObject); public static native String followReferencesPrimitiveArray(Object initialObject); + public static native String followReferencesPrimitiveFields(Object initialObject); } diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 95967b527c..bfb04a4fba 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -368,6 +368,7 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := # Tests that are broken with GC stress. # * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we # hope the second process got into the expected state. The slowness of gcstress makes this bad. +# * 152-dead-large-object requires a heap larger than what gcstress uses. # * 908-gc-start-finish expects GCs only to be run at clear points. The reduced heap size makes # this non-deterministic. Same for 913. # * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often @@ -375,6 +376,7 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := # slows down allocations significantly which these tests do a lot. TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \ 137-cfi \ + 152-dead-large-object \ 154-gc-loop \ 908-gc-start-finish \ 913-heaps \ @@ -810,6 +812,12 @@ endif TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmtid +TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar +TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar +TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar +TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar +TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar + # All tests require the host executables. The tests also depend on the core images, but on # specific version depending on the compiler. ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \ diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index f3d4332009..161aa2340d 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -371,6 +371,20 @@ fi if [ "$HAVE_IMAGE" = "n" ]; then + if [ "${HOST}" = "y" ]; then + framework="${ANDROID_HOST_OUT}/framework" + bpath_suffix="-hostdex" + else + framework="${ANDROID_ROOT}/framework" + bpath_suffix="-testdex" + fi + bpath="${framework}/core-libart${bpath_suffix}.jar" + bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar" + bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar" + bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar" + bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar" + # Pass down the bootclasspath + FLAGS="${FLAGS} -Xbootclasspath:${bpath}" # Add 5 minutes to give some time to generate the boot image. TIME_OUT_VALUE=$((${TIME_OUT_VALUE} + 300)) DALVIKVM_BOOT_OPT="-Ximage:/system/non-existant/core.art" diff --git a/test/knownfailures.json b/test/knownfailures.json index 784f49c4b9..6caf7b0f36 100644 --- a/test/knownfailures.json +++ b/test/knownfailures.json @@ -106,6 +106,11 @@ "slowness of gcstress makes this bad."] }, { + "test": "152-dead-large-object", + "variant": "gcstress", + "description": ["152-dead-large-object requires a heap larger than what gcstress uses."] + }, + { "tests": ["908-gc-start-finish", "913-heaps"], "variant": "gcstress", diff --git a/test/run-test b/test/run-test index 6134a14696..7d3d813da0 100755 --- a/test/run-test +++ b/test/run-test @@ -530,22 +530,6 @@ if [ "$have_image" = "no" ]; then err_echo "--no-image is only supported on the art runtime" exit 1 fi - if [ "$target_mode" = "no" ]; then - framework="${ANDROID_HOST_OUT}/framework" - bpath_suffix="-hostdex" - else - framework="${android_root}/framework" - bpath_suffix="" - fi - # TODO If the target was compiled WITH_DEXPREOPT=true then these tests will - # fail since these jar files will be stripped. - bpath="${framework}/core-libart${bpath_suffix}.jar" - bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar" - bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar" - bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar" - bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar" - # Pass down the bootclasspath - run_args="${run_args} --runtime-option -Xbootclasspath:${bpath}" run_args="${run_args} --no-image" fi diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index 65296406a1..e0aae46447 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -216,5 +216,55 @@ modes: [device], names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit", "libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"] +}, +{ + description: "Linker issues with libjavacoretests", + result: EXEC_FAILED, + bug: 35417197, + modes: [device], + names: [ + "dalvik.system.JniTest#testGetSuperclass", + "dalvik.system.JniTest#testPassingBooleans", + "dalvik.system.JniTest#testPassingBytes", + "dalvik.system.JniTest#testPassingChars", + "dalvik.system.JniTest#testPassingClass", + "dalvik.system.JniTest#testPassingDoubles", + "dalvik.system.JniTest#testPassingFloats", + "dalvik.system.JniTest#testPassingInts", + "dalvik.system.JniTest#testPassingLongs", + "dalvik.system.JniTest#testPassingObjectReferences", + "dalvik.system.JniTest#testPassingShorts", + "dalvik.system.JniTest#testPassingThis", + "libcore.java.lang.OldSystemTest#test_load", + "libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited", + "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull", + "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups", + "libcore.java.lang.ThreadTest#testGetStackTrace", + "libcore.java.lang.ThreadTest#testJavaContextClassLoader", + "libcore.java.lang.ThreadTest#testLeakingStartedThreads", + "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads", + "libcore.java.lang.ThreadTest#testNativeThreadNames", + "libcore.java.lang.ThreadTest#testParkUntilWithUnderflowValue", + "libcore.java.lang.ThreadTest#testThreadDoubleStart", + "libcore.java.lang.ThreadTest#testThreadInterrupted", + "libcore.java.lang.ThreadTest#testThreadRestart", + "libcore.java.lang.ThreadTest#testThreadSleep", + "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments", + "libcore.java.lang.ThreadTest#testThreadWakeup", + "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler", + "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler", + "libcore.java.util.TimeZoneTest#testDisplayNamesWithScript", + "libcore.java.util.zip.ZipEntryTest#testCommentAndExtraInSameOrder", + "libcore.java.util.zip.ZipEntryTest#testMaxLengthExtra", + "libcore.util.NativeAllocationRegistryTest#testBadSize", + "libcore.util.NativeAllocationRegistryTest#testEarlyFree", + "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry", + "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry", + "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry", + "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry", + "libcore.util.NativeAllocationRegistryTest#testNullArguments", + "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_y", + "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parse_yy" + ] } ] |