diff options
153 files changed, 3373 insertions, 2144 deletions
diff --git a/Android.mk b/Android.mk index cf3a9e7afb..f3ab3c17b0 100644 --- a/Android.mk +++ b/Android.mk @@ -341,56 +341,6 @@ valgrind-test-art-target64: valgrind-test-art-target-gtest64 endif # art_test_bother -######################################################################## -# oat-target and oat-target-sync rules - -OAT_TARGET_RULES := - -# $(1): input jar or apk target location -define declare-oat-target-target -OUT_OAT_FILE := $(PRODUCT_OUT)/$(basename $(1)).odex - -ifeq ($(ONE_SHOT_MAKEFILE),) -# ONE_SHOT_MAKEFILE is empty for a top level build and we don't want -# to define the oat-target-* rules there because they will conflict -# with the build/core/dex_preopt.mk defined rules. -.PHONY: oat-target-$(1) -oat-target-$(1): - -else -.PHONY: oat-target-$(1) -oat-target-$(1): $$(OUT_OAT_FILE) - -$$(OUT_OAT_FILE): $(PRODUCT_OUT)/$(1) $(DEFAULT_DEX_PREOPT_BUILT_IMAGE) $(DEX2OAT_DEPENDENCY) - @mkdir -p $$(dir $$@) - $(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \ - --boot-image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE) --dex-file=$(PRODUCT_OUT)/$(1) \ - --dex-location=/$(1) --oat-file=$$@ \ - --instruction-set=$(DEX2OAT_TARGET_ARCH) \ - --instruction-set-variant=$(DEX2OAT_TARGET_CPU_VARIANT) \ - --instruction-set-features=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \ - --android-root=$(PRODUCT_OUT)/system --include-patch-information \ - --runtime-arg -Xnorelocate - -endif - -OAT_TARGET_RULES += oat-target-$(1) -endef - -$(foreach file,\ - $(filter-out\ - $(addprefix $(TARGET_OUT_JAVA_LIBRARIES)/,$(addsuffix .jar,$(LIBART_TARGET_BOOT_JARS))),\ - $(wildcard $(TARGET_OUT_APPS)/*.apk) $(wildcard $(TARGET_OUT_JAVA_LIBRARIES)/*.jar)),\ - $(eval $(call declare-oat-target-target,$(subst $(PRODUCT_OUT)/,,$(file))))) - -.PHONY: oat-target -oat-target: $(ART_TARGET_DEPENDENCIES) $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE) $(OAT_TARGET_RULES) - -.PHONY: oat-target-sync -oat-target-sync: oat-target - $(TEST_ART_ADB_ROOT_AND_REMOUNT) - adb sync - #################################################################################################### # Fake packages to ensure generation of libopenjdkd when one builds with mm/mmm/mmma. # diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index bd7f900965..5bdfbc74eb 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -332,7 +332,7 @@ valgrind-$$(gtest_rule): $(ART_VALGRIND_TARGET_DEPENDENCIES) test-art-target-syn (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(4) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ valgrind --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \ --suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \ - --num-callers=50 \ + --num-callers=50 --show-mismatched-frees=no \ $$(PRIVATE_TARGET_EXE) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \ && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \ && $$(call ART_TEST_PASSED,$$@)) \ diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 86d92ff0b5..4180e0e6c9 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -487,7 +487,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) { EXPECT_EQ(72U, sizeof(OatHeader)); EXPECT_EQ(4U, sizeof(OatMethodOffsets)); EXPECT_EQ(20U, sizeof(OatQuickMethodHeader)); - EXPECT_EQ(164 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)), + EXPECT_EQ(163 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)), sizeof(QuickEntryPoints)); } diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 402eeee65f..f00648f570 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1378,28 +1378,21 @@ uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const { void CodeGenerator::EmitJitRoots(uint8_t* code, Handle<mirror::ObjectArray<mirror::Object>> roots, - const uint8_t* roots_data, - Handle<mirror::DexCache> outer_dex_cache) { + const uint8_t* roots_data) { DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots()); - StackHandleScope<1> hs(Thread::Current()); - MutableHandle<mirror::DexCache> h_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr)); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); size_t index = 0; for (auto& entry : jit_string_roots_) { - const DexFile& entry_dex_file = *entry.first.dex_file; - // Avoid the expensive FindDexCache call by checking if the string is - // in the compiled method's dex file. - h_dex_cache.Assign(IsSameDexFile(*outer_dex_cache->GetDexFile(), entry_dex_file) - ? outer_dex_cache.Get() - : class_linker->FindDexCache(hs.Self(), entry_dex_file)); - mirror::String* string = class_linker->LookupString( - entry_dex_file, entry.first.string_index, h_dex_cache); - DCHECK(string != nullptr) << "JIT roots require strings to have been loaded"; + // Update the `roots` with the string, and replace the address temporarily + // stored to the index in the table. + uint64_t address = entry.second; + roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr()); + DCHECK(roots->Get(index) != nullptr); + entry.second = index; // Ensure the string is strongly interned. This is a requirement on how the JIT // handles strings. b/32995596 - class_linker->GetInternTable()->InternStrong(string); - roots->Set(index, string); - entry.second = index; + class_linker->GetInternTable()->InternStrong( + reinterpret_cast<mirror::String*>(roots->Get(index))); ++index; } for (auto& entry : jit_class_roots_) { @@ -1407,6 +1400,7 @@ void CodeGenerator::EmitJitRoots(uint8_t* code, // stored to the index in the table. uint64_t address = entry.second; roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr()); + DCHECK(roots->Get(index) != nullptr); entry.second = index; ++index; } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 2e2c3c00af..6366b9838f 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -351,8 +351,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // Also emits literal patches. void EmitJitRoots(uint8_t* code, Handle<mirror::ObjectArray<mirror::Object>> roots, - const uint8_t* roots_data, - Handle<mirror::DexCache> outer_dex_cache) + const uint8_t* roots_data) REQUIRES_SHARED(Locks::mutator_lock_); bool IsLeafMethod() const { @@ -713,9 +712,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { const ArenaVector<HBasicBlock*>* block_order_; // Maps a StringReference (dex_file, string_index) to the index in the literal table. - // Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the - // indices. - ArenaSafeMap<StringReference, uint32_t, StringReferenceValueComparator> jit_string_roots_; + // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` + // will compute all the indices. + ArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_; // Maps a ClassReference (dex_file, type_index) to the index in the literal table. // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 8a7f6d3a33..541a1c5b8f 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -3936,7 +3936,6 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) { } else { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(Location::RegisterLocation(R0)); } @@ -3954,7 +3953,7 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } @@ -5937,7 +5936,9 @@ void LocationsBuilderARM::VisitLoadString(HLoadString* load) { } } -void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { LocationSummary* locations = load->GetLocations(); Location out_loc = locations->Out(); Register out = out_loc.AsRegister<Register>(); @@ -5962,8 +5963,9 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { return; // No dex cache slow path. } case HLoadString::LoadKind::kBootImageAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); + uint32_t address = dchecked_integral_cast<uint32_t>( + reinterpret_cast<uintptr_t>(load->GetString().Get())); + DCHECK_NE(address, 0u); __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address)); return; // No dex cache slow path. } @@ -5987,7 +5989,8 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { } case HLoadString::LoadKind::kJitTableAddress: { __ LoadLiteral(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), - load->GetStringIndex())); + load->GetStringIndex(), + load->GetString())); // /* GcRoot<mirror::String> */ out = *out GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); return; @@ -7317,8 +7320,10 @@ Literal* CodeGeneratorARM::DeduplicateBootImageAddressLiteral(uint32_t address) } Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file, - dex::StringIndex string_index) { - jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u); + dex::StringIndex string_index, + Handle<mirror::String> handle) { + jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), + reinterpret_cast64<uint64_t>(handle.GetReference())); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); }); diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 6435851320..d5968e0764 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -489,7 +489,9 @@ class CodeGeneratorARM : public CodeGenerator { dex::StringIndex string_index); Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index); Literal* DeduplicateBootImageAddressLiteral(uint32_t address); - Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index); + Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, + dex::StringIndex string_index, + Handle<mirror::String> handle); Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 5c33fe1a7d..9aaeadb44a 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -4137,8 +4137,9 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddres } vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral( - const DexFile& dex_file, dex::StringIndex string_index) { - jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u); + const DexFile& dex_file, dex::StringIndex string_index, Handle<mirror::String> handle) { + jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), + reinterpret_cast64<uint64_t>(handle.GetReference())); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); }); @@ -4527,7 +4528,9 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { } } -void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { Register out = OutputRegister(load); Location out_loc = load->GetLocations()->Out(); @@ -4550,8 +4553,10 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { return; // No dex cache slow path. } case HLoadString::LoadKind::kBootImageAddress: { - DCHECK(load->GetAddress() != 0u && IsUint<32>(load->GetAddress())); - __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress())); + uint32_t address = dchecked_integral_cast<uint32_t>( + reinterpret_cast<uintptr_t>(load->GetString().Get())); + DCHECK_NE(address, 0u); + __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); return; // No dex cache slow path. } case HLoadString::LoadKind::kBssEntry: { @@ -4582,7 +4587,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { } case HLoadString::LoadKind::kJitTableAddress: { __ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), - load->GetStringIndex())); + load->GetStringIndex(), + load->GetString())); GenerateGcRootFieldLoad(load, out_loc, out.X(), @@ -4738,7 +4744,6 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(LocationFrom(kArtMethodRegister)); } else { locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); } locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } @@ -4756,7 +4761,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 8f33b6becf..d6a5f9d1fa 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -567,7 +567,8 @@ class CodeGeneratorARM64 : public CodeGenerator { dex::TypeIndex type_index); vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address); vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file, - dex::StringIndex string_index); + dex::StringIndex string_index, + Handle<mirror::String> handle); vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file, dex::TypeIndex string_index, uint64_t address); diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 00ad3e34b7..c769decaa0 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -3948,7 +3948,6 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) { } else { InvokeRuntimeCallingConventionARMVIXL calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); } locations->SetOut(LocationFrom(r0)); } @@ -3970,7 +3969,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } @@ -6022,7 +6021,9 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) { } } -void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) { +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { LocationSummary* locations = load->GetLocations(); Location out_loc = locations->Out(); vixl32::Register out = OutputRegister(load); @@ -6042,8 +6043,9 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) { return; // No dex cache slow path. } case HLoadString::LoadKind::kBootImageAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); + uint32_t address = dchecked_integral_cast<uint32_t>( + reinterpret_cast<uintptr_t>(load->GetString().Get())); + DCHECK_NE(address, 0u); __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address)); return; // No dex cache slow path. } @@ -6063,7 +6065,8 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) { } case HLoadString::LoadKind::kJitTableAddress: { __ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), - load->GetStringIndex())); + load->GetStringIndex(), + load->GetString())); // /* GcRoot<mirror::String> */ out = *out GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); return; @@ -7444,9 +7447,12 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateDexCacheAddressLiteral(uint3 return DeduplicateUint32Literal(address, &uint32_literals_); } -VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(const DexFile& dex_file, - dex::StringIndex string_index) { - jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u); +VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral( + const DexFile& dex_file, + dex::StringIndex string_index, + Handle<mirror::String> handle) { + jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), + reinterpret_cast64<uint64_t>(handle.GetReference())); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), [this]() { diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 297d63cefd..200a463c75 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -573,7 +573,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator { VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address); VIXLUInt32Literal* DeduplicateDexCacheAddressLiteral(uint32_t address); VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, - dex::StringIndex string_index); + dex::StringIndex string_index, + Handle<mirror::String> handle); VIXLUInt32Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 01e0dac33e..bc62854e5d 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -5625,7 +5625,9 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) { } } -void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) { +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { HLoadString::LoadKind load_kind = load->GetLoadKind(); LocationSummary* locations = load->GetLocations(); Location out_loc = locations->Out(); @@ -5660,8 +5662,9 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) { return; // No dex cache slow path. } case HLoadString::LoadKind::kBootImageAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); + uint32_t address = dchecked_integral_cast<uint32_t>( + reinterpret_cast<uintptr_t>(load->GetString().Get())); + DCHECK_NE(address, 0u); __ LoadLiteral(out, base_or_current_method_reg, codegen_->DeduplicateBootImageAddressLiteral(address)); @@ -5900,7 +5903,6 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } @@ -5917,7 +5919,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 36690c0569..1b9c6da460 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -3628,7 +3628,9 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) { } } -void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) { +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { HLoadString::LoadKind load_kind = load->GetLoadKind(); LocationSummary* locations = load->GetLocations(); Location out_loc = locations->Out(); @@ -3650,8 +3652,9 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) { return; // No dex cache slow path. } case HLoadString::LoadKind::kBootImageAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); + uint32_t address = dchecked_integral_cast<uint32_t>( + reinterpret_cast<uintptr_t>(load->GetString().Get())); + DCHECK_NE(address, 0u); __ LoadLiteral(out, kLoadUnsignedWord, codegen_->DeduplicateBootImageAddressLiteral(address)); @@ -3841,7 +3844,6 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } @@ -3859,7 +3861,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); } } diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 0abe85540c..a9b717db4f 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -4150,7 +4150,6 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) { } else { InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } } @@ -4166,7 +4165,7 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) { codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); DCHECK(!codegen_->IsLeafMethod()); } } @@ -6232,15 +6231,19 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) { } Label* CodeGeneratorX86::NewJitRootStringPatch(const DexFile& dex_file, - dex::StringIndex dex_index) { - jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u); + dex::StringIndex dex_index, + Handle<mirror::String> handle) { + jit_string_roots_.Overwrite( + StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference())); // Add a patch entry and return the label. jit_string_patches_.emplace_back(dex_file, dex_index.index_); PatchInfo<Label>* info = &jit_string_patches_.back(); return &info->label; } -void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { LocationSummary* locations = load->GetLocations(); Location out_loc = locations->Out(); Register out = out_loc.AsRegister<Register>(); @@ -6258,8 +6261,9 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { return; // No dex cache slow path. } case HLoadString::LoadKind::kBootImageAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); + uint32_t address = dchecked_integral_cast<uint32_t>( + reinterpret_cast<uintptr_t>(load->GetString().Get())); + DCHECK_NE(address, 0u); __ movl(out, Immediate(address)); codegen_->RecordSimplePatch(); return; // No dex cache slow path. @@ -6280,7 +6284,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { case HLoadString::LoadKind::kJitTableAddress: { Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset); Label* fixup_label = codegen_->NewJitRootStringPatch( - load->GetDexFile(), load->GetStringIndex()); + load->GetDexFile(), load->GetStringIndex(), load->GetString()); // /* GcRoot<mirror::String> */ out = *address GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); return; diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 1af685087c..dd1628c867 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -415,7 +415,9 @@ class CodeGeneratorX86 : public CodeGenerator { void RecordTypePatch(HLoadClass* load_class); Label* NewStringBssEntryPatch(HLoadString* load_string); Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset); - Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index); + Label* NewJitRootStringPatch(const DexFile& dex_file, + dex::StringIndex dex_index, + Handle<mirror::String> handle); Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address); void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 903844fcdb..261473505f 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -4038,7 +4038,6 @@ void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) { locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); } else { locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); } locations->SetOut(Location::RegisterLocation(RAX)); } @@ -4055,7 +4054,7 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } else { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); - CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>(); + CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); DCHECK(!codegen_->IsLeafMethod()); } } @@ -5631,15 +5630,19 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) { } Label* CodeGeneratorX86_64::NewJitRootStringPatch(const DexFile& dex_file, - dex::StringIndex dex_index) { - jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index), /* placeholder */ 0u); + dex::StringIndex dex_index, + Handle<mirror::String> handle) { + jit_string_roots_.Overwrite( + StringReference(&dex_file, dex_index), reinterpret_cast64<uint64_t>(handle.GetReference())); // Add a patch entry and return the label. jit_string_patches_.emplace_back(dex_file, dex_index.index_); PatchInfo<Label>* info = &jit_string_patches_.back(); return &info->label; } -void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { +// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not +// move. +void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS { LocationSummary* locations = load->GetLocations(); Location out_loc = locations->Out(); CpuRegister out = out_loc.AsRegister<CpuRegister>(); @@ -5651,8 +5654,9 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { return; // No dex cache slow path. } case HLoadString::LoadKind::kBootImageAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); + uint32_t address = dchecked_integral_cast<uint32_t>( + reinterpret_cast<uintptr_t>(load->GetString().Get())); + DCHECK_NE(address, 0u); __ movl(out, Immediate(address)); // Zero-extended. codegen_->RecordSimplePatch(); return; // No dex cache slow path. @@ -5673,8 +5677,8 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { case HLoadString::LoadKind::kJitTableAddress: { Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ true); - Label* fixup_label = - codegen_->NewJitRootStringPatch(load->GetDexFile(), load->GetStringIndex()); + Label* fixup_label = codegen_->NewJitRootStringPatch( + load->GetDexFile(), load->GetStringIndex(), load->GetString()); // /* GcRoot<mirror::String> */ out = *address GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); return; diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index f827e79a94..32d006c5f3 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -412,7 +412,9 @@ class CodeGeneratorX86_64 : public CodeGenerator { void RecordTypePatch(HLoadClass* load_class); Label* NewStringBssEntryPatch(HLoadString* load_string); Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset); - Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index); + Label* NewJitRootStringPatch(const DexFile& dex_file, + dex::StringIndex dex_index, + Handle<mirror::String> handle); Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address); void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 879b4ce59e..e3f3df0ff5 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -15,6 +15,7 @@ */ #include <functional> +#include <memory> #include "arch/instruction_set.h" #include "arch/arm/instruction_set_features_arm.h" @@ -299,8 +300,8 @@ static void RunCode(CodegenTargetConfig target_config, bool has_result, Expected expected) { CompilerOptions compiler_options; - CodeGenerator* codegen = target_config.CreateCodeGenerator(graph, compiler_options); - RunCode(codegen, graph, hook_before_codegen, has_result, expected); + std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph, compiler_options)); + RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected); } #ifdef ART_ENABLE_CODEGEN_arm diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc index 437d35ccb7..f8d37bd714 100644 --- a/compiler/optimizing/gvn_test.cc +++ b/compiler/optimizing/gvn_test.cc @@ -28,7 +28,6 @@ class GVNTest : public CommonCompilerTest {}; TEST_F(GVNTest, LocalFieldElimination) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; HGraph* graph = CreateGraph(&allocator); HBasicBlock* entry = new (&allocator) HBasicBlock(graph); @@ -45,53 +44,53 @@ TEST_F(GVNTest, LocalFieldElimination) { entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* to_remove = block->GetLastInstruction(); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(43), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* different_offset = block->GetLastInstruction(); // Kill the value. block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* use_after_kill = block->GetLastInstruction(); block->AddInstruction(new (&allocator) HExit()); @@ -113,7 +112,6 @@ TEST_F(GVNTest, LocalFieldElimination) { TEST_F(GVNTest, GlobalFieldElimination) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; HGraph* graph = CreateGraph(&allocator); HBasicBlock* entry = new (&allocator) HBasicBlock(graph); @@ -129,13 +127,13 @@ TEST_F(GVNTest, GlobalFieldElimination) { graph->AddBlock(block); entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction())); @@ -152,33 +150,33 @@ TEST_F(GVNTest, GlobalFieldElimination) { else_->AddSuccessor(join); then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); then->AddInstruction(new (&allocator) HGoto()); else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); else_->AddInstruction(new (&allocator) HGoto()); join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); join->AddInstruction(new (&allocator) HExit()); @@ -196,7 +194,6 @@ TEST_F(GVNTest, GlobalFieldElimination) { TEST_F(GVNTest, LoopFieldElimination) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; HGraph* graph = CreateGraph(&allocator); HBasicBlock* entry = new (&allocator) HBasicBlock(graph); @@ -213,13 +210,13 @@ TEST_F(GVNTest, LoopFieldElimination) { graph->AddBlock(block); entry->AddSuccessor(block); block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); block->AddInstruction(new (&allocator) HGoto()); @@ -236,13 +233,13 @@ TEST_F(GVNTest, LoopFieldElimination) { loop_body->AddSuccessor(loop_header); loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction(); loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction())); @@ -251,35 +248,35 @@ TEST_F(GVNTest, LoopFieldElimination) { // and the body to be GVN'ed. loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_set = loop_body->GetLastInstruction(); loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction(); loop_body->AddInstruction(new (&allocator) HGoto()); exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); HInstruction* field_get_in_exit = exit->GetLastInstruction(); exit->AddInstruction(new (&allocator) HExit()); @@ -319,7 +316,6 @@ TEST_F(GVNTest, LoopFieldElimination) { TEST_F(GVNTest, LoopSideEffects) { ArenaPool pool; ArenaAllocator allocator(&pool); - ScopedNullHandle<mirror::DexCache> dex_cache; static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC(); @@ -376,13 +372,13 @@ TEST_F(GVNTest, LoopSideEffects) { // Make one block with a side effect. entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0)); SideEffectsAnalysis side_effects(graph); @@ -401,13 +397,13 @@ TEST_F(GVNTest, LoopSideEffects) { outer_loop_body->InsertInstructionBefore( new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0), outer_loop_body->GetLastInstruction()); @@ -427,13 +423,13 @@ TEST_F(GVNTest, LoopSideEffects) { inner_loop_body->InsertInstructionBefore( new (&allocator) HInstanceFieldSet(parameter, parameter, + nullptr, Primitive::kPrimNot, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0), inner_loop_body->GetLastInstruction()); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 3b83e95071..c970e5cbba 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -429,13 +429,13 @@ HInstanceFieldGet* HInliner::BuildGetReceiverClass(ClassLinker* class_linker, DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_"); HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet( receiver, + field, Primitive::kPrimNot, field->GetOffset(), field->IsVolatile(), field->GetDexFieldIndex(), field->GetDeclaringClass()->GetDexClassDefIndex(), *field->GetDexFile(), - handles_->NewHandle(field->GetDexCache()), dex_pc); // The class of a field is effectively final, and does not have any memory dependencies. result->SetSideEffects(SideEffects::None()); @@ -618,6 +618,9 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, } else { one_target_inlined = true; + VLOG(compiler) << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method) + << " has inlined " << ArtMethod::PrettyMethod(method); + // If we have inlined all targets before, and this receiver is the last seen, // we deoptimize instead of keeping the original invoke instruction. bool deoptimize = all_targets_inlined && @@ -655,6 +658,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, << " of its targets could be inlined"; return false; } + MaybeRecordStat(kInlinedPolymorphicCall); // Run type propagation to get the guards typed. @@ -1161,13 +1165,13 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex DCHECK(resolved_field != nullptr); HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet( obj, + resolved_field, resolved_field->GetTypeAsPrimitiveType(), resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, resolved_field->GetDeclaringClass()->GetDexClassDefIndex(), *dex_cache->GetDexFile(), - dex_cache, // Read barrier generates a runtime call in slow path and we need a valid // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537. /* dex_pc */ 0); @@ -1190,13 +1194,13 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet( obj, value, + resolved_field, resolved_field->GetTypeAsPrimitiveType(), resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, resolved_field->GetDeclaringClass()->GetDexClassDefIndex(), *dex_cache->GetDexFile(), - dex_cache, // Read barrier generates a runtime call in slow path and we need a valid // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537. /* dex_pc */ 0); @@ -1424,15 +1428,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, return false; } - if (current->IsNewInstance() && - (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) { - VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index) - << " could not be inlined because it is using an entrypoint" - << " with access checks"; - // Allocation entrypoint does not handle inlined frames. - return false; - } - if (current->IsNewArray() && (current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) { VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index) @@ -1579,6 +1574,13 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction, /* declared_can_be_null */ true, return_replacement)) { return true; + } else if (return_replacement->IsInstanceFieldGet()) { + HInstanceFieldGet* field_get = return_replacement->AsInstanceFieldGet(); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + if (field_get->GetFieldInfo().GetField() == + class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0)) { + return true; + } } } else if (return_replacement->IsInstanceOf()) { // Inlining InstanceOf into an If may put a tighter bound on reference types. diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index af8e2c8a7c..009d549547 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -917,11 +917,11 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d bool finalizable; bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable); - // Only the non-resolved entrypoint handles the finalizable class case. If we + // Only the access check entrypoint handles the finalizable class case. If we // need access checks, then we haven't resolved the method and the class may // again be finalizable. QuickEntrypointEnum entrypoint = (finalizable || needs_access_check) - ? kQuickAllocObject + ? kQuickAllocObjectWithChecks : kQuickAllocObjectInitialized; if (outer_dex_cache.Get() != dex_cache.Get()) { @@ -946,7 +946,6 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d AppendInstruction(new (arena_) HNewInstance( cls, - graph_->GetCurrentMethod(), dex_pc, type_index, *dex_compilation_unit_->GetDexFile(), @@ -1235,13 +1234,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex(); field_set = new (arena_) HInstanceFieldSet(object, value, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_compilation_unit_->GetDexCache(), dex_pc); } AppendInstruction(field_set); @@ -1256,13 +1255,13 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio } else { uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex(); field_get = new (arena_) HInstanceFieldGet(object, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_compilation_unit_->GetDexCache(), dex_pc); } AppendInstruction(field_get); @@ -1311,9 +1310,9 @@ bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) c } void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction, - uint32_t dex_pc, - bool is_put, - Primitive::Type field_type) { + uint32_t dex_pc, + bool is_put, + Primitive::Type field_type) { uint32_t source_or_dest_reg = instruction.VRegA_21c(); uint16_t field_index = instruction.VRegB_21c(); @@ -1400,23 +1399,23 @@ bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type)); AppendInstruction(new (arena_) HStaticFieldSet(cls, value, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_cache_, dex_pc)); } else { AppendInstruction(new (arena_) HStaticFieldGet(cls, + resolved_field, field_type, resolved_field->GetOffset(), resolved_field->IsVolatile(), field_index, class_def_index, *dex_file_, - dex_cache_, dex_pc)); UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction()); } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 439e3b66db..911bfb9cc6 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -1118,7 +1118,66 @@ void InstructionSimplifierVisitor::VisitAboveOrEqual(HAboveOrEqual* condition) { VisitCondition(condition); } +// Recognize the following pattern: +// obj.getClass() ==/!= Foo.class +// And replace it with a constant value if the type of `obj` is statically known. +static bool RecognizeAndSimplifyClassCheck(HCondition* condition) { + HInstruction* input_one = condition->InputAt(0); + HInstruction* input_two = condition->InputAt(1); + HLoadClass* load_class = input_one->IsLoadClass() + ? input_one->AsLoadClass() + : input_two->AsLoadClass(); + if (load_class == nullptr) { + return false; + } + + ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); + if (!class_rti.IsValid()) { + // Unresolved class. + return false; + } + + HInstanceFieldGet* field_get = (load_class == input_one) + ? input_two->AsInstanceFieldGet() + : input_one->AsInstanceFieldGet(); + if (field_get == nullptr) { + return false; + } + + HInstruction* receiver = field_get->InputAt(0); + ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo(); + if (!receiver_type.IsExact()) { + return false; + } + + { + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0); + DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_"); + if (field_get->GetFieldInfo().GetField() != field) { + return false; + } + + // We can replace the compare. + int value = 0; + if (receiver_type.IsEqual(class_rti)) { + value = condition->IsEqual() ? 1 : 0; + } else { + value = condition->IsNotEqual() ? 1 : 0; + } + condition->ReplaceWith(condition->GetBlock()->GetGraph()->GetIntConstant(value)); + return true; + } +} + void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) { + if (condition->IsEqual() || condition->IsNotEqual()) { + if (RecognizeAndSimplifyClassCheck(condition)) { + return; + } + } + // Reverse condition if left is constant. Our code generators prefer constant // on the right hand side. if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) { @@ -1843,11 +1902,11 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) { // so create the HArrayLength, HBoundsCheck and HArrayGet. HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true); invoke->GetBlock()->InsertInstructionBefore(length, invoke); - HBoundsCheck* bounds_check = - new (arena) HBoundsCheck(index, length, dex_pc, invoke->GetDexMethodIndex()); + HBoundsCheck* bounds_check = new (arena) HBoundsCheck( + index, length, dex_pc, invoke->GetDexMethodIndex()); invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke); - HArrayGet* array_get = - new (arena) HArrayGet(str, index, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true); + HArrayGet* array_get = new (arena) HArrayGet( + str, bounds_check, Primitive::kPrimChar, dex_pc, /* is_string_char_at */ true); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get); bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment()); GetGraph()->SetHasBoundsChecks(true); diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc index 8c34dc6a86..5bcfa4c98b 100644 --- a/compiler/optimizing/licm_test.cc +++ b/compiler/optimizing/licm_test.cc @@ -111,20 +111,19 @@ TEST_F(LICMTest, FieldHoisting) { BuildLoop(); // Populate the loop with instructions: set/get field with different types. - ScopedNullHandle<mirror::DexCache> dex_cache; HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, + nullptr, Primitive::kPrimLong, MemberOffset(10), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), - dex_cache, 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); HInstruction* set_field = new (&allocator_) HInstanceFieldSet( - parameter_, int_constant_, Primitive::kPrimInt, MemberOffset(20), - false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0); + parameter_, int_constant_, nullptr, Primitive::kPrimInt, MemberOffset(20), + false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); EXPECT_EQ(get_field->GetBlock(), loop_body_); @@ -140,24 +139,24 @@ TEST_F(LICMTest, NoFieldHoisting) { // Populate the loop with instructions: set/get field with same types. ScopedNullHandle<mirror::DexCache> dex_cache; HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_, + nullptr, Primitive::kPrimLong, MemberOffset(10), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), - dex_cache, 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_, get_field, + nullptr, Primitive::kPrimLong, MemberOffset(10), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), - dex_cache, 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index a599c2aa84..d45fa11534 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2498,6 +2498,17 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) { } } +// Helper for InstructionDataEquals to fetch the mirror String out +// from a kJitTableAddress LoadString kind. +// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing +// mirrors, they are stored in a variable size handle scope which is always +// visited during a pause. Also, the only caller of this helper +// only uses the mirror for pointer comparison. +static inline mirror::String* AsMirrorInternal(Handle<mirror::String> handle) + NO_THREAD_SAFETY_ANALYSIS { + return handle.Get(); +} + bool HLoadString::InstructionDataEquals(const HInstruction* other) const { const HLoadString* other_load_string = other->AsLoadString(); // TODO: To allow GVN for HLoadString from different dex files, we should compare the strings @@ -2506,16 +2517,16 @@ bool HLoadString::InstructionDataEquals(const HInstruction* other) const { GetPackedFields() != other_load_string->GetPackedFields()) { return false; } - LoadKind load_kind = GetLoadKind(); - if (HasAddress(load_kind)) { - return GetAddress() == other_load_string->GetAddress(); - } else { - DCHECK(HasStringReference(load_kind)) << load_kind; - return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile()); + switch (GetLoadKind()) { + case LoadKind::kBootImageAddress: + case LoadKind::kJitTableAddress: + return AsMirrorInternal(GetString()) == AsMirrorInternal(other_load_string->GetString()); + default: + return IsSameDexFile(GetDexFile(), other_load_string->GetDexFile()); } } -void HLoadString::SetLoadKindInternal(LoadKind load_kind) { +void HLoadString::SetLoadKind(LoadKind load_kind) { // Once sharpened, the load kind should not be changed again. DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod); SetPackedField<LoadKindField>(load_kind); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 8c64d25aee..ea9a94c420 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -171,6 +171,7 @@ class HInstructionList : public ValueObject { friend class HGraph; friend class HInstruction; friend class HInstructionIterator; + friend class HInstructionIteratorHandleChanges; friend class HBackwardInstructionIterator; DISALLOW_COPY_AND_ASSIGN(HInstructionList); @@ -2312,6 +2313,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { }; std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs); +// Iterates over the instructions, while preserving the next instruction +// in case the current instruction gets removed from the list by the user +// of this iterator. class HInstructionIterator : public ValueObject { public: explicit HInstructionIterator(const HInstructionList& instructions) @@ -2333,6 +2337,28 @@ class HInstructionIterator : public ValueObject { DISALLOW_COPY_AND_ASSIGN(HInstructionIterator); }; +// Iterates over the instructions without saving the next instruction, +// therefore handling changes in the graph potentially made by the user +// of this iterator. +class HInstructionIteratorHandleChanges : public ValueObject { + public: + explicit HInstructionIteratorHandleChanges(const HInstructionList& instructions) + : instruction_(instructions.first_instruction_) { + } + + bool Done() const { return instruction_ == nullptr; } + HInstruction* Current() const { return instruction_; } + void Advance() { + instruction_ = instruction_->GetNext(); + } + + private: + HInstruction* instruction_; + + DISALLOW_COPY_AND_ASSIGN(HInstructionIteratorHandleChanges); +}; + + class HBackwardInstructionIterator : public ValueObject { public: explicit HBackwardInstructionIterator(const HInstructionList& instructions) @@ -3748,10 +3774,9 @@ class HCompare FINAL : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HCompare); }; -class HNewInstance FINAL : public HExpression<2> { +class HNewInstance FINAL : public HExpression<1> { public: HNewInstance(HInstruction* cls, - HCurrentMethod* current_method, uint32_t dex_pc, dex::TypeIndex type_index, const DexFile& dex_file, @@ -3765,7 +3790,6 @@ class HNewInstance FINAL : public HExpression<2> { SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check); SetPackedFlag<kFlagFinalizable>(finalizable); SetRawInputAt(0, cls); - SetRawInputAt(1, current_method); } dex::TypeIndex GetTypeIndex() const { return type_index_; } @@ -5056,60 +5080,62 @@ class HNullCheck FINAL : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HNullCheck); }; +// Embeds an ArtField and all the information required by the compiler. We cache +// that information to avoid requiring the mutator lock every time we need it. class FieldInfo : public ValueObject { public: - FieldInfo(MemberOffset field_offset, + FieldInfo(ArtField* field, + MemberOffset field_offset, Primitive::Type field_type, bool is_volatile, uint32_t index, uint16_t declaring_class_def_index, - const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache) - : field_offset_(field_offset), + const DexFile& dex_file) + : field_(field), + field_offset_(field_offset), field_type_(field_type), is_volatile_(is_volatile), index_(index), declaring_class_def_index_(declaring_class_def_index), - dex_file_(dex_file), - dex_cache_(dex_cache) {} + dex_file_(dex_file) {} + ArtField* GetField() const { return field_; } MemberOffset GetFieldOffset() const { return field_offset_; } Primitive::Type GetFieldType() const { return field_type_; } uint32_t GetFieldIndex() const { return index_; } uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;} const DexFile& GetDexFile() const { return dex_file_; } bool IsVolatile() const { return is_volatile_; } - Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; } private: + ArtField* const field_; const MemberOffset field_offset_; const Primitive::Type field_type_; const bool is_volatile_; const uint32_t index_; const uint16_t declaring_class_def_index_; const DexFile& dex_file_; - const Handle<mirror::DexCache> dex_cache_; }; class HInstanceFieldGet FINAL : public HExpression<1> { public: HInstanceFieldGet(HInstruction* value, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetRawInputAt(0, value); } @@ -5145,22 +5171,22 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { public: HInstanceFieldSet(HInstruction* object, HInstruction* value, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetPackedFlag<kFlagValueCanBeNull>(true); SetRawInputAt(0, object); SetRawInputAt(1, value); @@ -5761,39 +5787,31 @@ class HLoadString FINAL : public HInstruction { uint32_t dex_pc) : HInstruction(SideEffectsForArchRuntimeCalls(), dex_pc), special_input_(HUserRecord<HInstruction*>(current_method)), - string_index_(string_index) { + string_index_(string_index), + dex_file_(dex_file) { SetPackedField<LoadKindField>(LoadKind::kDexCacheViaMethod); - load_data_.dex_file_ = &dex_file; } - void SetLoadKindWithAddress(LoadKind load_kind, uint64_t address) { - DCHECK(HasAddress(load_kind)); - load_data_.address = address; - SetLoadKindInternal(load_kind); - } - - void SetLoadKindWithStringReference(LoadKind load_kind, - const DexFile& dex_file, - dex::StringIndex string_index) { - DCHECK(HasStringReference(load_kind)); - load_data_.dex_file_ = &dex_file; - string_index_ = string_index; - SetLoadKindInternal(load_kind); - } + void SetLoadKind(LoadKind load_kind); LoadKind GetLoadKind() const { return GetPackedField<LoadKindField>(); } - const DexFile& GetDexFile() const; + const DexFile& GetDexFile() const { + return dex_file_; + } dex::StringIndex GetStringIndex() const { return string_index_; } - uint64_t GetAddress() const { - DCHECK(HasAddress(GetLoadKind())); - return load_data_.address; + Handle<mirror::String> GetString() const { + return string_; + } + + void SetString(Handle<mirror::String> str) { + string_ = str; } bool CanBeMoved() const OVERRIDE { return true; } @@ -5848,18 +5866,6 @@ class HLoadString FINAL : public HInstruction { static_assert(kNumberOfLoadStringPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields."); using LoadKindField = BitField<LoadKind, kFieldLoadKind, kFieldLoadKindSize>; - static bool HasStringReference(LoadKind load_kind) { - return load_kind == LoadKind::kBootImageLinkTimeAddress || - load_kind == LoadKind::kBootImageLinkTimePcRelative || - load_kind == LoadKind::kBssEntry || - load_kind == LoadKind::kDexCacheViaMethod || - load_kind == LoadKind::kJitTableAddress; - } - - static bool HasAddress(LoadKind load_kind) { - return load_kind == LoadKind::kBootImageAddress; - } - void SetLoadKindInternal(LoadKind load_kind); // The special input is the HCurrentMethod for kDexCacheViaMethod. @@ -5867,26 +5873,16 @@ class HLoadString FINAL : public HInstruction { // for PC-relative loads, i.e. kDexCachePcRelative or kBootImageLinkTimePcRelative. HUserRecord<HInstruction*> special_input_; - // String index serves also as the hash code and it's also needed for slow-paths, - // so it must not be overwritten with other load data. dex::StringIndex string_index_; + const DexFile& dex_file_; - union { - const DexFile* dex_file_; // For string reference. - uint64_t address; // Up to 64-bit, needed for kDexCacheAddress on 64-bit targets. - } load_data_; + Handle<mirror::String> string_; DISALLOW_COPY_AND_ASSIGN(HLoadString); }; std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs); // Note: defined outside class to see operator<<(., HLoadString::LoadKind). -inline const DexFile& HLoadString::GetDexFile() const { - DCHECK(HasStringReference(GetLoadKind())) << GetLoadKind(); - return *load_data_.dex_file_; -} - -// Note: defined outside class to see operator<<(., HLoadString::LoadKind). inline void HLoadString::AddSpecialInput(HInstruction* special_input) { // The special input is used for PC-relative loads on some architectures, // including literal pool loads, which are PC-relative too. @@ -5937,22 +5933,22 @@ class HClinitCheck FINAL : public HExpression<1> { class HStaticFieldGet FINAL : public HExpression<1> { public: HStaticFieldGet(HInstruction* cls, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetRawInputAt(0, cls); } @@ -5985,22 +5981,22 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { public: HStaticFieldSet(HInstruction* cls, HInstruction* value, + ArtField* field, Primitive::Type field_type, MemberOffset field_offset, bool is_volatile, uint32_t field_idx, uint16_t declaring_class_def_index, const DexFile& dex_file, - Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc), - field_info_(field_offset, + field_info_(field, + field_offset, field_type, is_volatile, field_idx, declaring_class_def_index, - dex_file, - dex_cache) { + dex_file) { SetPackedFlag<kFlagValueCanBeNull>(true); SetRawInputAt(0, cls); SetRawInputAt(1, value); diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 4bf5b080a7..297500b12f 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1205,7 +1205,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, } MaybeRecordStat(MethodCompilationStat::kCompiled); codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item); - codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data, dex_cache); + codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data); const void* code = code_cache->CommitCode( self, diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index f9ac3a0f72..db7c1fbb06 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -134,39 +134,6 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) { } } -void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) { - HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass(); - const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse(); - // Change the entrypoint to kQuickAllocObject if either: - // - the class is finalizable (only kQuickAllocObject handles finalizable classes), - // - the class needs access checks (we do not know if it's finalizable), - // - or the load class has only one use. - if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) { - instruction->SetEntrypoint(kQuickAllocObject); - instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex().index_), 0); - if (has_only_one_use) { - // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass, - // do it manually if possible. - if (!load_class->CanThrow()) { - // If the load class can not throw, it has no side effects and can be removed if there is - // only one use. - load_class->GetBlock()->RemoveInstruction(load_class); - } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() && - CanMoveClinitCheck(load_class, instruction)) { - // The allocation entry point that deals with access checks does not work with inlined - // methods, so we need to check whether this allocation comes from an inlined method. - // We also need to make the same check as for moving clinit check, whether the HLoadClass - // has the clinit check responsibility or not (HLoadClass can throw anyway). - // If it needed access checks, we delegate the access check to the allocation. - if (load_class->NeedsAccessCheck()) { - instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck); - } - load_class->GetBlock()->RemoveInstruction(load_class); - } - } - } -} - bool PrepareForRegisterAllocation::CanEmitConditionAt(HCondition* condition, HInstruction* user) const { if (condition->GetNext() != user) { diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index a6791482a7..c128227654 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -44,7 +44,6 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { void VisitClinitCheck(HClinitCheck* check) OVERRIDE; void VisitCondition(HCondition* condition) OVERRIDE; void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE; - void VisitNewInstance(HNewInstance* instruction) OVERRIDE; bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const; bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 33b3875e3b..f8a4469712 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -76,6 +76,7 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor { worklist_(worklist), is_first_run_(is_first_run) {} + void VisitDeoptimize(HDeoptimize* deopt) OVERRIDE; void VisitNewInstance(HNewInstance* new_instance) OVERRIDE; void VisitLoadClass(HLoadClass* load_class) OVERRIDE; void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE; @@ -151,38 +152,6 @@ void ReferenceTypePropagation::Visit(HInstruction* instruction) { instruction->Accept(&visitor); } -void ReferenceTypePropagation::Run() { - worklist_.reserve(kDefaultWorklistSize); - - // To properly propagate type info we need to visit in the dominator-based order. - // Reverse post order guarantees a node's dominators are visited first. - // We take advantage of this order in `VisitBasicBlock`. - for (HBasicBlock* block : graph_->GetReversePostOrder()) { - VisitBasicBlock(block); - } - - ProcessWorklist(); - ValidateTypes(); -} - -void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { - RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_); - // Handle Phis first as there might be instructions in the same block who depend on them. - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - VisitPhi(it.Current()->AsPhi()); - } - - // Handle instructions. - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - instr->Accept(&visitor); - } - - // Add extra nodes to bound types. - BoundTypeForIfNotNull(block); - BoundTypeForIfInstanceOf(block); -} - // Check if we should create a bound type for the given object at the specified // position. Because of inlining and the fact we run RTP more than once and we // might have a HBoundType already. If we do, we should not create a new one. @@ -225,6 +194,153 @@ static bool ShouldCreateBoundType(HInstruction* position, return false; } +// Helper method to bound the type of `receiver` for all instructions dominated +// by `start_block`, or `start_instruction` if `start_block` is null. The new +// bound type will have its upper bound be `class_rti`. +static void BoundTypeIn(HInstruction* receiver, + HBasicBlock* start_block, + HInstruction* start_instruction, + const ReferenceTypeInfo& class_rti) { + // We only need to bound the type if we have uses in the relevant block. + // So start with null and create the HBoundType lazily, only if it's needed. + HBoundType* bound_type = nullptr; + DCHECK(!receiver->IsLoadClass()) << "We should not replace HLoadClass instructions"; + const HUseList<HInstruction*>& uses = receiver->GetUses(); + for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) { + HInstruction* user = it->GetUser(); + size_t index = it->GetIndex(); + // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput(). + ++it; + bool dominates = (start_instruction != nullptr) + ? start_instruction->StrictlyDominates(user) + : start_block->Dominates(user->GetBlock()); + if (!dominates) { + continue; + } + if (bound_type == nullptr) { + ScopedObjectAccess soa(Thread::Current()); + HInstruction* insert_point = (start_instruction != nullptr) + ? start_instruction->GetNext() + : start_block->GetFirstInstruction(); + if (ShouldCreateBoundType( + insert_point, receiver, class_rti, start_instruction, start_block)) { + bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver); + bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false); + start_block->InsertInstructionBefore(bound_type, insert_point); + // To comply with the RTP algorithm, don't type the bound type just yet, it will + // be handled in RTPVisitor::VisitBoundType. + } else { + // We already have a bound type on the position we would need to insert + // the new one. The existing bound type should dominate all the users + // (dchecked) so there's no need to continue. + break; + } + } + user->ReplaceInput(bound_type, index); + } + // If the receiver is a null check, also bound the type of the actual + // receiver. + if (receiver->IsNullCheck()) { + BoundTypeIn(receiver->InputAt(0), start_block, start_instruction, class_rti); + } +} + +// Recognize the patterns: +// if (obj.shadow$_klass_ == Foo.class) ... +// deoptimize if (obj.shadow$_klass_ == Foo.class) +static void BoundTypeForClassCheck(HInstruction* check) { + if (!check->IsIf() && !check->IsDeoptimize()) { + return; + } + HInstruction* compare = check->InputAt(0); + if (!compare->IsEqual() && !compare->IsNotEqual()) { + return; + } + HInstruction* input_one = compare->InputAt(0); + HInstruction* input_two = compare->InputAt(1); + HLoadClass* load_class = input_one->IsLoadClass() + ? input_one->AsLoadClass() + : input_two->AsLoadClass(); + if (load_class == nullptr) { + return; + } + + ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); + if (!class_rti.IsValid()) { + // We have loaded an unresolved class. Don't bother bounding the type. + return; + } + + HInstanceFieldGet* field_get = (load_class == input_one) + ? input_two->AsInstanceFieldGet() + : input_one->AsInstanceFieldGet(); + if (field_get == nullptr) { + return; + } + HInstruction* receiver = field_get->InputAt(0); + ReferenceTypeInfo receiver_type = receiver->GetReferenceTypeInfo(); + if (receiver_type.IsExact()) { + // If we already know the receiver type, don't bother updating its users. + return; + } + + { + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0); + DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_"); + if (field_get->GetFieldInfo().GetField() != field) { + return; + } + } + + if (check->IsIf()) { + HBasicBlock* trueBlock = check->IsEqual() + ? check->AsIf()->IfTrueSuccessor() + : check->AsIf()->IfFalseSuccessor(); + BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti); + } else { + DCHECK(check->IsDeoptimize()); + if (check->IsEqual()) { + BoundTypeIn(receiver, check->GetBlock(), check, class_rti); + } + } +} + +void ReferenceTypePropagation::Run() { + worklist_.reserve(kDefaultWorklistSize); + + // To properly propagate type info we need to visit in the dominator-based order. + // Reverse post order guarantees a node's dominators are visited first. + // We take advantage of this order in `VisitBasicBlock`. + for (HBasicBlock* block : graph_->GetReversePostOrder()) { + VisitBasicBlock(block); + } + + ProcessWorklist(); + ValidateTypes(); +} + +void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { + RTPVisitor visitor(graph_, hint_dex_cache_, &handle_cache_, &worklist_, is_first_run_); + // Handle Phis first as there might be instructions in the same block who depend on them. + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + VisitPhi(it.Current()->AsPhi()); + } + + // Handle instructions. Since RTP may add HBoundType instructions just after the + // last visited instruction, use `HInstructionIteratorHandleChanges` iterator. + for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) { + HInstruction* instr = it.Current(); + instr->Accept(&visitor); + } + + // Add extra nodes to bound types. + BoundTypeForIfNotNull(block); + BoundTypeForIfInstanceOf(block); + BoundTypeForClassCheck(block->GetLastInstruction()); +} + void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { HIf* ifInstruction = block->GetLastInstruction()->AsIf(); if (ifInstruction == nullptr) { @@ -254,40 +370,14 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { // We only need to bound the type if we have uses in the relevant block. // So start with null and create the HBoundType lazily, only if it's needed. - HBoundType* bound_type = nullptr; HBasicBlock* notNullBlock = ifInput->IsNotEqual() ? ifInstruction->IfTrueSuccessor() : ifInstruction->IfFalseSuccessor(); - const HUseList<HInstruction*>& uses = obj->GetUses(); - for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) { - HInstruction* user = it->GetUser(); - size_t index = it->GetIndex(); - // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput(). - ++it; - if (notNullBlock->Dominates(user->GetBlock())) { - if (bound_type == nullptr) { - ScopedObjectAccess soa(Thread::Current()); - HInstruction* insert_point = notNullBlock->GetFirstInstruction(); - ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create( - handle_cache_.GetObjectClassHandle(), /* is_exact */ false); - if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) { - bound_type = new (graph_->GetArena()) HBoundType(obj); - bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false); - if (obj->GetReferenceTypeInfo().IsValid()) { - bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo()); - } - notNullBlock->InsertInstructionBefore(bound_type, insert_point); - } else { - // We already have a bound type on the position we would need to insert - // the new one. The existing bound type should dominate all the users - // (dchecked) so there's no need to continue. - break; - } - } - user->ReplaceInput(bound_type, index); - } - } + ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create( + handle_cache_.GetObjectClassHandle(), /* is_exact */ false); + + BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti); } // Returns true if one of the patterns below has been recognized. If so, the @@ -378,15 +468,10 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass(); ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); - { - if (!class_rti.IsValid()) { - // He have loaded an unresolved class. Don't bother bounding the type. - return; - } + if (!class_rti.IsValid()) { + // He have loaded an unresolved class. Don't bother bounding the type. + return; } - // We only need to bound the type if we have uses in the relevant block. - // So start with null and create the HBoundType lazily, only if it's needed. - HBoundType* bound_type = nullptr; HInstruction* obj = instanceOf->InputAt(0); if (obj->GetReferenceTypeInfo().IsExact() && !obj->IsPhi()) { @@ -398,33 +483,14 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { // input. return; } - DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions"; - const HUseList<HInstruction*>& uses = obj->GetUses(); - for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) { - HInstruction* user = it->GetUser(); - size_t index = it->GetIndex(); - // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput(). - ++it; - if (instanceOfTrueBlock->Dominates(user->GetBlock())) { - if (bound_type == nullptr) { - ScopedObjectAccess soa(Thread::Current()); - HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction(); - if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) { - bound_type = new (graph_->GetArena()) HBoundType(obj); - bool is_exact = class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes(); - bound_type->SetUpperBound(ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), is_exact), - /* InstanceOf fails for null. */ false); - instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point); - } else { - // We already have a bound type on the position we would need to insert - // the new one. The existing bound type should dominate all the users - // (dchecked) so there's no need to continue. - break; - } - } - user->ReplaceInput(bound_type, index); + + { + ScopedObjectAccess soa(Thread::Current()); + if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) { + class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false); } } + BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti); } void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr, @@ -464,6 +530,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst } } +void ReferenceTypePropagation::RTPVisitor::VisitDeoptimize(HDeoptimize* instr) { + BoundTypeForClassCheck(instr); +} + void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr, dex::TypeIndex type_idx, const DexFile& dex_file, @@ -515,16 +585,9 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio ScopedObjectAccess soa(Thread::Current()); ObjPtr<mirror::Class> klass; - // The field index is unknown only during tests. - if (info.GetFieldIndex() != kUnknownFieldIndex) { - ClassLinker* cl = Runtime::Current()->GetClassLinker(); - ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), - MakeObjPtr(info.GetDexCache().Get())); - // TODO: There are certain cases where we can't resolve the field. - // b/21914925 is open to keep track of a repro case for this issue. - if (field != nullptr) { - klass = field->GetType<false>(); - } + // The field is unknown only during tests. + if (info.GetField() != nullptr) { + klass = info.GetField()->GetType<false>(); } SetClassAsTypeInfo(instr, klass, /* is_exact */ false); diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index 559f40923b..2227872f76 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -492,7 +492,6 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, HInstruction** input2) { HGraph* graph = CreateGraph(allocator); HBasicBlock* entry = new (allocator) HBasicBlock(graph); - ScopedNullHandle<mirror::DexCache> dex_cache; graph->AddBlock(entry); graph->SetEntryBlock(entry); HInstruction* parameter = new (allocator) HParameterValue( @@ -504,13 +503,13 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, entry->AddSuccessor(block); HInstruction* test = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimBoolean, MemberOffset(22), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); block->AddInstruction(test); block->AddInstruction(new (allocator) HIf(test)); @@ -531,22 +530,22 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator, *phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt); join->AddPhi(*phi); *input1 = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimInt, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); *input2 = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimInt, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); then->AddInstruction(*input1); else_->AddInstruction(*input2); @@ -654,7 +653,6 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator, HInstruction** field, HInstruction** ret) { HGraph* graph = CreateGraph(allocator); - ScopedNullHandle<mirror::DexCache> dex_cache; HBasicBlock* entry = new (allocator) HBasicBlock(graph); graph->AddBlock(entry); graph->SetEntryBlock(entry); @@ -667,13 +665,13 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator, entry->AddSuccessor(block); *field = new (allocator) HInstanceFieldGet(parameter, + nullptr, Primitive::kPrimInt, MemberOffset(42), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph->GetDexFile(), - dex_cache, 0); block->AddInstruction(*field); *ret = new (allocator) HReturn(*field); diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index ca26c30dcf..dc8ee23ba4 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -275,7 +275,6 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { dex::StringIndex string_index = load_string->GetStringIndex(); HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod; - uint64_t address = 0u; // String or dex cache element address. { Runtime* runtime = Runtime::Current(); ClassLinker* class_linker = runtime->GetClassLinker(); @@ -284,12 +283,13 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { Handle<mirror::DexCache> dex_cache = IsSameDexFile(dex_file, *compilation_unit_.GetDexFile()) ? compilation_unit_.GetDexCache() : hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)); + mirror::String* string = nullptr; if (codegen_->GetCompilerOptions().IsBootImage()) { // Compiling boot image. Resolve the string and allocate it if needed, to ensure // the string will be added to the boot image. DCHECK(!runtime->UseJitCompilation()); - mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache); + string = class_linker->ResolveString(dex_file, string_index, dex_cache); CHECK(string != nullptr); if (compiler_driver_->GetSupportBootImageFixup()) { DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file)); @@ -303,43 +303,32 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { } else if (runtime->UseJitCompilation()) { // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus. // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic()); - mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache); + string = class_linker->LookupString(dex_file, string_index, dex_cache); if (string != nullptr) { if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) { desired_load_kind = HLoadString::LoadKind::kBootImageAddress; - address = reinterpret_cast64<uint64_t>(string); } else { desired_load_kind = HLoadString::LoadKind::kJitTableAddress; } } } else { // AOT app compilation. Try to lookup the string without allocating if not found. - mirror::String* string = class_linker->LookupString(dex_file, string_index, dex_cache); + string = class_linker->LookupString(dex_file, string_index, dex_cache); if (string != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(string) && !codegen_->GetCompilerOptions().GetCompilePic()) { desired_load_kind = HLoadString::LoadKind::kBootImageAddress; - address = reinterpret_cast64<uint64_t>(string); } else { desired_load_kind = HLoadString::LoadKind::kBssEntry; } } + if (string != nullptr) { + load_string->SetString(handles_->NewHandle(string)); + } } HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind); - switch (load_kind) { - case HLoadString::LoadKind::kBootImageLinkTimeAddress: - case HLoadString::LoadKind::kBootImageLinkTimePcRelative: - case HLoadString::LoadKind::kBssEntry: - case HLoadString::LoadKind::kDexCacheViaMethod: - case HLoadString::LoadKind::kJitTableAddress: - load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index); - break; - case HLoadString::LoadKind::kBootImageAddress: - DCHECK_NE(address, 0u); - load_string->SetLoadKindWithAddress(load_kind, address); - break; - } + load_string->SetLoadKind(load_kind); } } // namespace art diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc index ab4f9e944c..a3fce02970 100644 --- a/compiler/utils/assembler_thumb_test_expected.cc.inc +++ b/compiler/utils/assembler_thumb_test_expected.cc.inc @@ -5610,7 +5610,7 @@ const char* const VixlJniHelpersResults[] = { " 214: ecbd 8a10 vpop {s16-s31}\n", " 218: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n", " 21c: 4660 mov r0, ip\n", - " 21e: f8d9 c2b0 ldr.w ip, [r9, #688] ; 0x2b0\n", + " 21e: f8d9 c2ac ldr.w ip, [r9, #684] ; 0x2ac\n", " 222: 47e0 blx ip\n", nullptr }; diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index ece81e3919..21b03eb8ba 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -1095,9 +1095,6 @@ class Dex2Oat FINAL { compiler_options_->GetNativeDebuggable() ? OatHeader::kTrueValue : OatHeader::kFalseValue); key_value_store_->Put(OatHeader::kCompilerFilter, CompilerFilter::NameOfFilter(compiler_options_->GetCompilerFilter())); - key_value_store_->Put(OatHeader::kHasPatchInfoKey, - compiler_options_->GetIncludePatchInformation() ? OatHeader::kTrueValue - : OatHeader::kFalseValue); } // Parse the arguments from the command line. In case of an unrecognized option or impossible diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc index 181b2ed723..6c2c81576e 100644 --- a/runtime/arch/arm/instruction_set_features_arm.cc +++ b/runtime/arch/arm/instruction_set_features_arm.cc @@ -39,80 +39,69 @@ using android::base::StringPrintf; ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromVariant( const std::string& variant, std::string* error_msg) { - // Assume all ARM processors are SMP. - // TODO: set the SMP support based on variant. - const bool smp = true; - // Look for variants that have divide support. static const char* arm_variants_with_div[] = { - "cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57", - "cortex-a53.a57", "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5", - "cyclone", "denver", "krait", "swift" }; + "cortex-a7", + "cortex-a12", + "cortex-a15", + "cortex-a17", + "cortex-a53", + "cortex-a53.a57", + "cortex-a57", + "denver", + "krait", + }; - bool has_div = FindVariantInArray(arm_variants_with_div, arraysize(arm_variants_with_div), + bool has_div = FindVariantInArray(arm_variants_with_div, + arraysize(arm_variants_with_div), variant); // Look for variants that have LPAE support. static const char* arm_variants_with_lpae[] = { - "cortex-a7", "cortex-a15", "krait", "denver", "cortex-a53", "cortex-a57", "cortex-a53.a57" + "cortex-a7", + "cortex-a12", + "cortex-a15", + "cortex-a17", + "cortex-a53", + "cortex-a53.a57", + "cortex-a57", + "denver", + "krait", }; - bool has_lpae = FindVariantInArray(arm_variants_with_lpae, arraysize(arm_variants_with_lpae), + bool has_lpae = FindVariantInArray(arm_variants_with_lpae, + arraysize(arm_variants_with_lpae), variant); if (has_div == false && has_lpae == false) { - // Avoid unsupported variants. - static const char* unsupported_arm_variants[] = { - // ARM processors that aren't ARMv7 compatible aren't supported. - "arm2", "arm250", "arm3", "arm6", "arm60", "arm600", "arm610", "arm620", - "cortex-m0", "cortex-m0plus", "cortex-m1", - "fa526", "fa626", "fa606te", "fa626te", "fmp626", "fa726te", - "iwmmxt", "iwmmxt2", - "strongarm", "strongarm110", "strongarm1100", "strongarm1110", - "xscale" - }; - if (FindVariantInArray(unsupported_arm_variants, arraysize(unsupported_arm_variants), - variant)) { - *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str()); - return ArmFeaturesUniquePtr(); - } - // Warn if the variant is unknown. - // TODO: some of the variants below may have feature support, but that support is currently - // unknown so we'll choose conservative (sub-optimal) defaults without warning. - // TODO: some of the architectures may not support all features required by ART and should be - // moved to unsupported_arm_variants[] above. - static const char* arm_variants_without_known_features[] = { + static const char* arm_variants_with_default_features[] = { + "cortex-a5", + "cortex-a8", + "cortex-a9", + "cortex-a9-mp", "default", - "arm7", "arm7m", "arm7d", "arm7dm", "arm7di", "arm7dmi", "arm70", "arm700", "arm700i", - "arm710", "arm710c", "arm7100", "arm720", "arm7500", "arm7500fe", "arm7tdmi", "arm7tdmi-s", - "arm710t", "arm720t", "arm740t", - "arm8", "arm810", - "arm9", "arm9e", "arm920", "arm920t", "arm922t", "arm946e-s", "arm966e-s", "arm968e-s", - "arm926ej-s", "arm940t", "arm9tdmi", - "arm10tdmi", "arm1020t", "arm1026ej-s", "arm10e", "arm1020e", "arm1022e", - "arm1136j-s", "arm1136jf-s", - "arm1156t2-s", "arm1156t2f-s", "arm1176jz-s", "arm1176jzf-s", - "cortex-a5", "cortex-a8", "cortex-a9", "cortex-a9-mp", "cortex-r4f", - "marvell-pj4", "mpcore", "mpcorenovfp" + "generic" }; - if (!FindVariantInArray(arm_variants_without_known_features, - arraysize(arm_variants_without_known_features), + if (!FindVariantInArray(arm_variants_with_default_features, + arraysize(arm_variants_with_default_features), variant)) { - LOG(WARNING) << "Unknown instruction set features for ARM CPU variant (" << variant + *error_msg = StringPrintf("Attempt to use unsupported ARM variant: %s", variant.c_str()); + return nullptr; + } else { + // Warn if we use the default features. + LOG(WARNING) << "Using default instruction set features for ARM CPU variant (" << variant << ") using conservative defaults"; } } - return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae)); + return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae)); } ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromBitmap(uint32_t bitmap) { - bool smp = (bitmap & kSmpBitfield) != 0; bool has_div = (bitmap & kDivBitfield) != 0; bool has_atomic_ldrd_strd = (bitmap & kAtomicLdrdStrdBitfield) != 0; - return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd)); + return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd)); } ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCppDefines() { - const bool smp = true; #if defined(__ARM_ARCH_EXT_IDIV__) const bool has_div = true; #else @@ -123,13 +112,12 @@ ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCppDefines() { #else const bool has_lpae = false; #endif - return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae)); + return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae)); } ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() { // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that // the kernel puts the appropriate feature flags in here. Sometimes it doesn't. - bool smp = false; bool has_lpae = false; bool has_div = false; @@ -151,9 +139,6 @@ ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() { if (line.find("lpae") != std::string::npos) { has_lpae = true; } - } else if (line.find("processor") != std::string::npos && - line.find(": 1") != std::string::npos) { - smp = true; } } } @@ -161,12 +146,10 @@ ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromCpuInfo() { } else { LOG(ERROR) << "Failed to open /proc/cpuinfo"; } - return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae)); + return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae)); } ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromHwcap() { - bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1; - bool has_div = false; bool has_lpae = false; @@ -184,7 +167,7 @@ ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromHwcap() { } #endif - return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae)); + return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae)); } // A signal handler called by a fault for an illegal instruction. We record the fact in r0 @@ -203,8 +186,6 @@ static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATT } ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromAssembly() { - const bool smp = true; - // See if have a sdiv instruction. Register a signal handler and try to execute an sdiv // instruction. If we get a SIGILL then it's not supported. struct sigaction sa, osa; @@ -230,7 +211,7 @@ ArmFeaturesUniquePtr ArmInstructionSetFeatures::FromAssembly() { #else const bool has_lpae = false; #endif - return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(smp, has_div, has_lpae)); + return ArmFeaturesUniquePtr(new ArmInstructionSetFeatures(has_div, has_lpae)); } bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const { @@ -238,28 +219,21 @@ bool ArmInstructionSetFeatures::Equals(const InstructionSetFeatures* other) cons return false; } const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures(); - return IsSmp() == other_as_arm->IsSmp() && - has_div_ == other_as_arm->has_div_ && + return has_div_ == other_as_arm->has_div_ && has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_; } uint32_t ArmInstructionSetFeatures::AsBitmap() const { - return (IsSmp() ? kSmpBitfield : 0) | - (has_div_ ? kDivBitfield : 0) | + return (has_div_ ? kDivBitfield : 0) | (has_atomic_ldrd_strd_ ? kAtomicLdrdStrdBitfield : 0); } std::string ArmInstructionSetFeatures::GetFeatureString() const { std::string result; - if (IsSmp()) { - result += "smp"; - } else { - result += "-smp"; - } if (has_div_) { - result += ",div"; + result += "div"; } else { - result += ",-div"; + result += "-div"; } if (has_atomic_ldrd_strd_) { result += ",atomic_ldrd_strd"; @@ -271,7 +245,7 @@ std::string ArmInstructionSetFeatures::GetFeatureString() const { std::unique_ptr<const InstructionSetFeatures> ArmInstructionSetFeatures::AddFeaturesFromSplitString( - const bool smp, const std::vector<std::string>& features, std::string* error_msg) const { + const std::vector<std::string>& features, std::string* error_msg) const { bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_; bool has_div = has_div_; for (auto i = features.begin(); i != features.end(); i++) { @@ -290,7 +264,7 @@ ArmInstructionSetFeatures::AddFeaturesFromSplitString( } } return std::unique_ptr<const InstructionSetFeatures>( - new ArmInstructionSetFeatures(smp, has_div, has_atomic_ldrd_strd)); + new ArmInstructionSetFeatures(has_div, has_atomic_ldrd_strd)); } } // namespace art diff --git a/runtime/arch/arm/instruction_set_features_arm.h b/runtime/arch/arm/instruction_set_features_arm.h index 204d1d76cc..11f8bf0117 100644 --- a/runtime/arch/arm/instruction_set_features_arm.h +++ b/runtime/arch/arm/instruction_set_features_arm.h @@ -74,20 +74,19 @@ class ArmInstructionSetFeatures FINAL : public InstructionSetFeatures { protected: // Parse a vector of the form "div", "lpae" adding these to a new ArmInstructionSetFeatures. std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features, + AddFeaturesFromSplitString(const std::vector<std::string>& features, std::string* error_msg) const OVERRIDE; private: - ArmInstructionSetFeatures(bool smp, bool has_div, bool has_atomic_ldrd_strd) - : InstructionSetFeatures(smp), + ArmInstructionSetFeatures(bool has_div, bool has_atomic_ldrd_strd) + : InstructionSetFeatures(), has_div_(has_div), has_atomic_ldrd_strd_(has_atomic_ldrd_strd) { } // Bitmap positions for encoding features as a bitmap. enum { - kSmpBitfield = 1, - kDivBitfield = 2, - kAtomicLdrdStrdBitfield = 4, + kDivBitfield = 1 << 0, + kAtomicLdrdStrdBitfield = 1 << 1, }; const bool has_div_; diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc index 44b1640f04..697ca9015e 100644 --- a/runtime/arch/arm/instruction_set_features_arm_test.cc +++ b/runtime/arch/arm/instruction_set_features_arm_test.cc @@ -31,8 +31,8 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) { EXPECT_TRUE(krait_features->Equals(krait_features.get())); EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); - EXPECT_STREQ("smp,div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str()); - EXPECT_EQ(krait_features->AsBitmap(), 7U); + EXPECT_STREQ("div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str()); + EXPECT_EQ(krait_features->AsBitmap(), 3U); // Build features for a 32-bit ARM denver processor. std::unique_ptr<const InstructionSetFeatures> denver_features( @@ -44,21 +44,21 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) { EXPECT_TRUE(krait_features->Equals(denver_features.get())); EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); - EXPECT_STREQ("smp,div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str()); - EXPECT_EQ(denver_features->AsBitmap(), 7U); + EXPECT_STREQ("div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str()); + EXPECT_EQ(denver_features->AsBitmap(), 3U); // Build features for a 32-bit ARMv7 processor. - std::unique_ptr<const InstructionSetFeatures> arm7_features( - InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg)); - ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg; - - EXPECT_TRUE(arm7_features->Equals(arm7_features.get())); - EXPECT_FALSE(arm7_features->Equals(krait_features.get())); - EXPECT_FALSE(krait_features->Equals(arm7_features.get())); - EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); - EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); - EXPECT_STREQ("smp,-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str()); - EXPECT_EQ(arm7_features->AsBitmap(), 1U); + std::unique_ptr<const InstructionSetFeatures> generic_features( + InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg)); + ASSERT_TRUE(generic_features.get() != nullptr) << error_msg; + + EXPECT_TRUE(generic_features->Equals(generic_features.get())); + EXPECT_FALSE(generic_features->Equals(krait_features.get())); + EXPECT_FALSE(krait_features->Equals(generic_features.get())); + EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); + EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); + EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str()); + EXPECT_EQ(generic_features->AsBitmap(), 0U); // ARM6 is not a supported architecture variant. std::unique_ptr<const InstructionSetFeatures> arm6_features( @@ -70,7 +70,7 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) { TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) { std::string error_msg; std::unique_ptr<const InstructionSetFeatures> base_features( - InstructionSetFeatures::FromVariant(kArm, "arm7", &error_msg)); + InstructionSetFeatures::FromVariant(kArm, "generic", &error_msg)); ASSERT_TRUE(base_features.get() != nullptr) << error_msg; // Build features for a 32-bit ARM with LPAE and div processor. @@ -82,8 +82,8 @@ TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) { EXPECT_TRUE(krait_features->Equals(krait_features.get())); EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); EXPECT_TRUE(krait_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); - EXPECT_STREQ("smp,div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str()); - EXPECT_EQ(krait_features->AsBitmap(), 7U); + EXPECT_STREQ("div,atomic_ldrd_strd", krait_features->GetFeatureString().c_str()); + EXPECT_EQ(krait_features->AsBitmap(), 3U); // Build features for a 32-bit ARM processor with LPAE and div flipped. std::unique_ptr<const InstructionSetFeatures> denver_features( @@ -95,21 +95,21 @@ TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) { EXPECT_TRUE(krait_features->Equals(denver_features.get())); EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); EXPECT_TRUE(denver_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); - EXPECT_STREQ("smp,div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str()); - EXPECT_EQ(denver_features->AsBitmap(), 7U); + EXPECT_STREQ("div,atomic_ldrd_strd", denver_features->GetFeatureString().c_str()); + EXPECT_EQ(denver_features->AsBitmap(), 3U); // Build features for a 32-bit default ARM processor. - std::unique_ptr<const InstructionSetFeatures> arm7_features( + std::unique_ptr<const InstructionSetFeatures> generic_features( base_features->AddFeaturesFromString("default", &error_msg)); - ASSERT_TRUE(arm7_features.get() != nullptr) << error_msg; - - EXPECT_TRUE(arm7_features->Equals(arm7_features.get())); - EXPECT_FALSE(arm7_features->Equals(krait_features.get())); - EXPECT_FALSE(krait_features->Equals(arm7_features.get())); - EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); - EXPECT_FALSE(arm7_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); - EXPECT_STREQ("smp,-div,-atomic_ldrd_strd", arm7_features->GetFeatureString().c_str()); - EXPECT_EQ(arm7_features->AsBitmap(), 1U); + ASSERT_TRUE(generic_features.get() != nullptr) << error_msg; + + EXPECT_TRUE(generic_features->Equals(generic_features.get())); + EXPECT_FALSE(generic_features->Equals(krait_features.get())); + EXPECT_FALSE(krait_features->Equals(generic_features.get())); + EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasDivideInstruction()); + EXPECT_FALSE(generic_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd()); + EXPECT_STREQ("-div,-atomic_ldrd_strd", generic_features->GetFeatureString().c_str()); + EXPECT_EQ(generic_features->AsBitmap(), 0U); } } // namespace art diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index a71ab4b53c..4d4ebdcad8 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -1124,28 +1124,23 @@ END art_quick_resolve_string // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). -ENTRY art_quick_alloc_object_rosalloc +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc). +ENTRY art_quick_alloc_object_resolved_rosalloc // Fast path rosalloc allocation. - // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current - // r2, r3, r12: free. - ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array - // Load the class (r2) - ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] - cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class - + // r0: type/return value, r9: Thread::Current + // r1, r2, r3, r12: free. ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local // allocation stack has room. // TODO: consider using ldrd. ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET] cmp r3, r12 - bhs .Lart_quick_alloc_object_rosalloc_slow_path + bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path - ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3) + ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3) cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread // local allocation. Also does the // initialized and finalizable checks. - bhs .Lart_quick_alloc_object_rosalloc_slow_path + bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path // Compute the rosalloc bracket index // from the size. Since the size is // already aligned we can combine the @@ -1159,7 +1154,7 @@ ENTRY art_quick_alloc_object_rosalloc // Load the free list head (r3). This // will be the return val. ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] - cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path + cbz r3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1. ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head // and update the list head with the @@ -1172,8 +1167,8 @@ ENTRY art_quick_alloc_object_rosalloc #if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET #error "Class pointer needs to overwrite next pointer." #endif - POISON_HEAP_REF r2 - str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET] + POISON_HEAP_REF r0 + str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] // Fence. This is "ish" not "ishst" so // that it also ensures ordering of // the class status load with respect @@ -1204,20 +1199,20 @@ ENTRY art_quick_alloc_object_rosalloc mov r0, r3 // Set the return value and return. bx lr -.Lart_quick_alloc_object_rosalloc_slow_path: +.Lart_quick_alloc_object_resolved_rosalloc_slow_path: SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC - mov r2, r9 @ pass Thread::Current - bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*) + mov r1, r9 @ pass Thread::Current + bl artAllocObjectFromCodeResolvedRosAlloc @ (mirror::Class* cls, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER -END art_quick_alloc_object_rosalloc +END art_quick_alloc_object_resolved_rosalloc -// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. +// The common fast path code for art_quick_alloc_object_resolved_tlab +// and art_quick_alloc_object_resolved_region_tlab. // -// r0: type_idx/return value, r1: ArtMethod*, r2: class, r9: Thread::Current, r3, r12: free. -// Need to preserve r0 and r1 to the slow path. -.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel - cbz r2, \slowPathLabel // Check null class +// r0: type r9: Thread::Current, r1, r2, r3, r12: free. +// Need to preserve r0 to the slow path. +.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel // Load thread_local_pos (r12) and // thread_local_end (r3) with ldrd. // Check constraints for ldrd. @@ -1232,14 +1227,14 @@ END art_quick_alloc_object_rosalloc // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1. // Reload old thread_local_pos (r0) // for the return value. - ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET] - add r1, r0, r3 + ldr r2, [r9, #THREAD_LOCAL_POS_OFFSET] + add r1, r2, r3 str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. add r1, r1, #1 str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] - POISON_HEAP_REF r2 - str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. + POISON_HEAP_REF r0 + str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. // Fence. This is "ish" not "ishst" so // that the code after this allocation // site will see the right values in @@ -1247,71 +1242,46 @@ END art_quick_alloc_object_rosalloc // Alternatively we could use "ishst" // if we use load-acquire for the // object size load.) + mov r0, r2 dmb ish bx lr .endm -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). -ENTRY art_quick_alloc_object_tlab +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_tlab, TLAB). +ENTRY art_quick_alloc_object_resolved_tlab // Fast path tlab allocation. - // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current - // r2, r3, r12: free. + // r0: type, r9: Thread::Current + // r1, r2, r3, r12: free. #if defined(USE_READ_BARRIER) mvn r0, #0 // Read barrier not supported here. bx lr // Return -1. #endif - ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array - // Load the class (r2) - ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] - ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path -.Lart_quick_alloc_object_tlab_slow_path: + ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path +.Lart_quick_alloc_object_resolved_tlab_slow_path: SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC. - mov r2, r9 // Pass Thread::Current. - bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*) + mov r1, r9 // Pass Thread::Current. + bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class* klass, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER -END art_quick_alloc_object_tlab +END art_quick_alloc_object_resolved_tlab -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) -ENTRY art_quick_alloc_object_region_tlab +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) +ENTRY art_quick_alloc_object_resolved_region_tlab // Fast path tlab allocation. - // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current, r2, r3, r12: free. + // r0: type, r9: Thread::Current, r1, r2, r3, r12: free. #if !defined(USE_READ_BARRIER) eor r0, r0, r0 // Read barrier must be enabled here. sub r0, r0, #1 // Return -1. bx lr #endif - ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array - // Load the class (r2) - ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] - // Read barrier for class load. - ldr r3, [r9, #THREAD_IS_GC_MARKING_OFFSET] - cbnz r3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking -.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit: - ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path -.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking: - cbz r2, .Lart_quick_alloc_object_region_tlab_slow_path // Null check for loading lock word. - // Check lock word for mark bit, if marked do the allocation. - ldr r3, [r2, MIRROR_OBJECT_LOCK_WORD_OFFSET] - ands r3, #LOCK_WORD_MARK_BIT_MASK_SHIFTED - bne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit -.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path: - // The read barrier slow path. Mark - // the class. - push {r0, r1, r3, lr} // Save registers. r3 is pushed only - // to align sp by 16 bytes. - mov r0, r2 // Pass the class as the first param. - bl artReadBarrierMark - mov r2, r0 // Get the (marked) class back. - pop {r0, r1, r3, lr} - b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit -.Lart_quick_alloc_object_region_tlab_slow_path: + ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path +.Lart_quick_alloc_object_resolved_region_tlab_slow_path: SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC. - mov r2, r9 // Pass Thread::Current. - bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*) + mov r1, r9 // Pass Thread::Current. + bl artAllocObjectFromCodeResolvedRegionTLAB // (mirror::Class* klass, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER -END art_quick_alloc_object_region_tlab +END art_quick_alloc_object_resolved_region_tlab /* * Called by managed code when the value in rSUSPEND has been decremented to 0. diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc index 52d8b3e367..c59874332f 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64.cc +++ b/runtime/arch/arm64/instruction_set_features_arm64.cc @@ -31,8 +31,6 @@ using android::base::StringPrintf; Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant( const std::string& variant, std::string* error_msg) { - const bool smp = true; // Conservative default. - // Look for variants that need a fix for a53 erratum 835769. static const char* arm64_variants_with_a53_835769_bug[] = { "default", "generic", "cortex-a53" // Pessimistically assume all generic ARM64s are A53s. @@ -58,50 +56,27 @@ Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant( bool needs_a53_843419_fix = needs_a53_835769_fix; return Arm64FeaturesUniquePtr( - new Arm64InstructionSetFeatures(smp, needs_a53_835769_fix, needs_a53_843419_fix)); + new Arm64InstructionSetFeatures(needs_a53_835769_fix, needs_a53_843419_fix)); } Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) { - bool smp = (bitmap & kSmpBitfield) != 0; bool is_a53 = (bitmap & kA53Bitfield) != 0; - return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53)); + return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53)); } Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCppDefines() { - const bool smp = true; const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s. - return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53)); + return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53)); } Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCpuInfo() { - // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that - // the kernel puts the appropriate feature flags in here. Sometimes it doesn't. - bool smp = false; const bool is_a53 = true; // Conservative default. - - std::ifstream in("/proc/cpuinfo"); - if (!in.fail()) { - while (!in.eof()) { - std::string line; - std::getline(in, line); - if (!in.eof()) { - LOG(INFO) << "cpuinfo line: " << line; - if (line.find("processor") != std::string::npos && line.find(": 1") != std::string::npos) { - smp = true; - } - } - } - in.close(); - } else { - LOG(ERROR) << "Failed to open /proc/cpuinfo"; - } - return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53)); + return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53)); } Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromHwcap() { - bool smp = sysconf(_SC_NPROCESSORS_CONF) > 1; const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s. - return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(smp, is_a53, is_a53)); + return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53)); } Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() { @@ -113,32 +88,28 @@ bool Arm64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) co if (kArm64 != other->GetInstructionSet()) { return false; } - const Arm64InstructionSetFeatures* other_as_arm = other->AsArm64InstructionSetFeatures(); - return fix_cortex_a53_835769_ == other_as_arm->fix_cortex_a53_835769_; + const Arm64InstructionSetFeatures* other_as_arm64 = other->AsArm64InstructionSetFeatures(); + return fix_cortex_a53_835769_ == other_as_arm64->fix_cortex_a53_835769_ && + fix_cortex_a53_843419_ == other_as_arm64->fix_cortex_a53_843419_; } uint32_t Arm64InstructionSetFeatures::AsBitmap() const { - return (IsSmp() ? kSmpBitfield : 0) | (fix_cortex_a53_835769_ ? kA53Bitfield : 0); + return (fix_cortex_a53_835769_ ? kA53Bitfield : 0); } std::string Arm64InstructionSetFeatures::GetFeatureString() const { std::string result; - if (IsSmp()) { - result += "smp"; - } else { - result += "-smp"; - } if (fix_cortex_a53_835769_) { - result += ",a53"; + result += "a53"; } else { - result += ",-a53"; + result += "-a53"; } return result; } std::unique_ptr<const InstructionSetFeatures> Arm64InstructionSetFeatures::AddFeaturesFromSplitString( - const bool smp, const std::vector<std::string>& features, std::string* error_msg) const { + const std::vector<std::string>& features, std::string* error_msg) const { bool is_a53 = fix_cortex_a53_835769_; for (auto i = features.begin(); i != features.end(); i++) { std::string feature = android::base::Trim(*i); @@ -152,7 +123,7 @@ Arm64InstructionSetFeatures::AddFeaturesFromSplitString( } } return std::unique_ptr<const InstructionSetFeatures>( - new Arm64InstructionSetFeatures(smp, is_a53, is_a53)); + new Arm64InstructionSetFeatures(is_a53, is_a53)); } } // namespace art diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h index e51aa1c43d..4243d32968 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64.h +++ b/runtime/arch/arm64/instruction_set_features_arm64.h @@ -73,20 +73,19 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures { protected: // Parse a vector of the form "a53" adding these to a new ArmInstructionSetFeatures. std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features, + AddFeaturesFromSplitString(const std::vector<std::string>& features, std::string* error_msg) const OVERRIDE; private: - Arm64InstructionSetFeatures(bool smp, bool needs_a53_835769_fix, bool needs_a53_843419_fix) - : InstructionSetFeatures(smp), + Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix) + : InstructionSetFeatures(), fix_cortex_a53_835769_(needs_a53_835769_fix), fix_cortex_a53_843419_(needs_a53_843419_fix) { } // Bitmap positions for encoding features as a bitmap. enum { - kSmpBitfield = 1, - kA53Bitfield = 2, + kA53Bitfield = 1 << 0, }; const bool fix_cortex_a53_835769_; diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc index 027e59c57a..cefa4993c8 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc +++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc @@ -28,8 +28,8 @@ TEST(Arm64InstructionSetFeaturesTest, Arm64Features) { ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg; EXPECT_EQ(arm64_features->GetInstructionSet(), kArm64); EXPECT_TRUE(arm64_features->Equals(arm64_features.get())); - EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str()); - EXPECT_EQ(arm64_features->AsBitmap(), 3U); + EXPECT_STREQ("a53", arm64_features->GetFeatureString().c_str()); + EXPECT_EQ(arm64_features->AsBitmap(), 1U); } } // namespace art diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index b88515f21f..8b1e0388c6 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -1669,7 +1669,6 @@ END art_quick_resolve_string // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS // Comment out allocators that have arm64 specific asm. -// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) @@ -1682,27 +1681,23 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). -ENTRY art_quick_alloc_object_rosalloc +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc). +ENTRY art_quick_alloc_object_resolved_rosalloc // Fast path rosalloc allocation. - // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current - // x2-x7: free. - ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array - // Load the class (x2) - ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] - cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class + // x0: type, xSELF(x19): Thread::Current + // x1-x7: free. ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local // allocation stack has room. // ldp won't work due to large offset. ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET] cmp x3, x4 - bhs .Lart_quick_alloc_object_rosalloc_slow_path - ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3) + bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path + ldr w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3) cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread // local allocation. Also does the // finalizable and initialization // checks. - bhs .Lart_quick_alloc_object_rosalloc_slow_path + bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path // Compute the rosalloc bracket index // from the size. Since the size is // already aligned we can combine the @@ -1715,7 +1710,7 @@ ENTRY art_quick_alloc_object_rosalloc // Load the free list head (x3). This // will be the return val. ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] - cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path + cbz x3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head // and update the list head with the @@ -1728,8 +1723,8 @@ ENTRY art_quick_alloc_object_rosalloc #if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET #error "Class pointer needs to overwrite next pointer." #endif - POISON_HEAP_REF w2 - str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET] + POISON_HEAP_REF w0 + str w0, [x3, #MIRROR_OBJECT_CLASS_OFFSET] // Fence. This is "ish" not "ishst" so // that it also ensures ordering of // the object size load with respect @@ -1759,13 +1754,13 @@ ENTRY art_quick_alloc_object_rosalloc mov x0, x3 // Set the return value and return. ret -.Lart_quick_alloc_object_rosalloc_slow_path: - SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC - mov x2, xSELF // pass Thread::Current - bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*) +.Lart_quick_alloc_object_resolved_rosalloc_slow_path: + SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC + mov x1, xSELF // pass Thread::Current + bl artAllocObjectFromCodeResolvedRosAlloc // (mirror::Class* klass, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER -END art_quick_alloc_object_rosalloc +END art_quick_alloc_object_resolved_rosalloc // The common fast path code for art_quick_alloc_array_region_tlab. @@ -1834,16 +1829,6 @@ END art_quick_alloc_object_rosalloc ret .endm -// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. -// -// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current -// x3-x7: free. -// Need to preserve x0 and x1 to the slow path. -.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel - cbz x2, \slowPathLabel // Check null class - ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel -.endm - // TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as // ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED. .macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel @@ -1853,20 +1838,18 @@ END art_quick_alloc_object_rosalloc .macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET] ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET] - ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7). + ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7). add x6, x4, x7 // Add object size to tlab pos. cmp x6, x5 // Check if it fits, overflow works // since the tlab pos and end are 32 // bit values. bhi \slowPathLabel - // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. - mov x0, x4 str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. add x5, x5, #1 str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] - POISON_HEAP_REF w2 - str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. + POISON_HEAP_REF w0 + str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. // Fence. This is "ish" not "ishst" so // that the code after this allocation // site will see the right values in @@ -1874,91 +1857,52 @@ END art_quick_alloc_object_rosalloc // Alternatively we could use "ishst" // if we use load-acquire for the // object size load.) + mov x0, x4 dmb ish ret .endm -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). -ENTRY art_quick_alloc_object_tlab +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). +ENTRY art_quick_alloc_object_resolved_tlab // Fast path tlab allocation. - // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current - // x2-x7: free. + // x0: type, xSELF(x19): Thread::Current + // x1-x7: free. #if defined(USE_READ_BARRIER) mvn x0, xzr // Read barrier not supported here. ret // Return -1. #endif - ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array - // Load the class (x2) - ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] - ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path -.Lart_quick_alloc_object_tlab_slow_path: - SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. - mov x2, xSELF // Pass Thread::Current. - bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*) + ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_object_resolved_tlab_slow_path +.Lart_quick_alloc_object_resolved_tlab_slow_path: + SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. + mov x1, xSELF // Pass Thread::Current. + bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class*, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER -END art_quick_alloc_object_tlab +END art_quick_alloc_object_resolved_tlab // The common code for art_quick_alloc_object_*region_tlab -.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier +.macro GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB name, entrypoint, fast_path ENTRY \name // Fast path region tlab allocation. - // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current - // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index. - // x2-x7: free. + // x0: type, xSELF(x19): Thread::Current + // x1-x7: free. #if !defined(USE_READ_BARRIER) mvn x0, xzr // Read barrier must be enabled here. ret // Return -1. #endif -.if \is_resolved - mov x2, x0 // class is actually stored in x0 already -.else - ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array - // Load the class (x2) - ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] - // If the class is null, go slow path. The check is required to read the lock word. - cbz w2, .Lslow_path\name -.endif -.if \read_barrier - // Most common case: GC is not marking. - ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET] - cbnz x3, .Lmarking\name -.endif .Ldo_allocation\name: \fast_path .Lslow_path\name -.Lmarking\name: -.if \read_barrier - // GC is marking, check the lock word of the class for the mark bit. - // Class is not null, check mark bit in lock word. - ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET] - // If the bit is not zero, do the allocation. - tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name - // The read barrier slow path. Mark - // the class. - SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr). - SAVE_REG xLR, 24 // Align sp by 16 bytes. - mov x0, x2 // Pass the class as the first param. - bl artReadBarrierMark - mov x2, x0 // Get the (marked) class back. - RESTORE_REG xLR, 24 - RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers. - b .Ldo_allocation\name -.endif .Lslow_path\name: SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. - mov x2, xSELF // Pass Thread::Current. - bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) + mov x1, xSELF // Pass Thread::Current. + bl \entrypoint // (mirror::Class*, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER END \name .endm -// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB. -GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1 -// No read barrier for the resolved or initialized cases since the caller is responsible for the -// read barrier due to the to-space invariant. -GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0 -GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0 +GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED +GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED // TODO: We could use this macro for the normal tlab allocator too. diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc index db004e7495..00d22c4f1f 100644 --- a/runtime/arch/instruction_set_features.cc +++ b/runtime/arch/instruction_set_features.cc @@ -218,7 +218,6 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeature } std::vector<std::string> features; Split(feature_list, ',', &features); - bool smp = smp_; bool use_default = false; // Have we seen the 'default' feature? bool first = false; // Is this first feature? for (auto it = features.begin(); it != features.end();) { @@ -236,14 +235,7 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeature *error_msg = "Unexpected instruction set features before 'default'"; return std::unique_ptr<const InstructionSetFeatures>(); } - } else if (feature == "smp") { - smp = true; - erase = true; - } else if (feature == "-smp") { - smp = false; - erase = true; } - // Erase the smp feature once processed. if (!erase) { ++it; } else { @@ -252,11 +244,11 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeature first = true; } // Expectation: "default" is standalone, no other flags. But an empty features vector after - // processing can also come along if the handled flags (at the moment only smp) are the only - // ones in the list. So logically, we check "default -> features.empty." + // processing can also come along if the handled flags are the only ones in the list. So + // logically, we check "default -> features.empty." DCHECK(!use_default || features.empty()); - return AddFeaturesFromSplitString(smp, features, error_msg); + return AddFeaturesFromSplitString(features, error_msg); } const ArmInstructionSetFeatures* InstructionSetFeatures::AsArmInstructionSetFeatures() const { diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h index d84bc02495..b6c5c71818 100644 --- a/runtime/arch/instruction_set_features.h +++ b/runtime/arch/instruction_set_features.h @@ -76,11 +76,6 @@ class InstructionSetFeatures { // Return a string of the form "div,lpae" or "none". virtual std::string GetFeatureString() const = 0; - // Does the instruction set variant require instructions for correctness with SMP? - bool IsSmp() const { - return smp_; - } - // Down cast this ArmInstructionFeatures. const ArmInstructionSetFeatures* AsArmInstructionSetFeatures() const; @@ -102,7 +97,7 @@ class InstructionSetFeatures { virtual ~InstructionSetFeatures() {} protected: - explicit InstructionSetFeatures(bool smp) : smp_(smp) {} + InstructionSetFeatures() {} // Returns true if variant appears in the array variants. static bool FindVariantInArray(const char* const variants[], size_t num_variants, @@ -110,12 +105,10 @@ class InstructionSetFeatures { // Add architecture specific features in sub-classes. virtual std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(bool smp, const std::vector<std::string>& features, + AddFeaturesFromSplitString(const std::vector<std::string>& features, std::string* error_msg) const = 0; private: - const bool smp_; - DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures); }; std::ostream& operator<<(std::ostream& os, const InstructionSetFeatures& rhs); diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc index 5b50573695..3c5afc28a3 100644 --- a/runtime/arch/mips/instruction_set_features_mips.cc +++ b/runtime/arch/mips/instruction_set_features_mips.cc @@ -71,8 +71,6 @@ static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bi MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant( const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) { - bool smp = true; // Conservative default. - // Override defaults based on compiler flags. // This is needed when running ART test where the variant is not defined. bool fpu_32bit; @@ -90,7 +88,7 @@ MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant( fpu_32bit = (variant[kPrefixLength] < '5'); mips_isa_gte2 = (variant[kPrefixLength] >= '2'); } else if (variant == "default") { - // Default variant is: smp = true, has FPU, is gte2. This is the traditional setting. + // Default variant has FPU, is gte2. This is the traditional setting. // // Note, we get FPU bitness and R6-ness from the build (using cpp defines, see above) // and don't override them because many things depend on the "default" variant being @@ -102,58 +100,32 @@ MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant( LOG(WARNING) << "Unexpected CPU variant for Mips32 using defaults: " << variant; } - return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6)); + return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6)); } -MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap( - uint32_t bitmap) { - bool smp = (bitmap & kSmpBitfield) != 0; +MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) { bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0; bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0; bool r6 = (bitmap & kR6) != 0; - return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6)); + return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6)); } MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCppDefines() { - // Assume conservative defaults. - const bool smp = true; - bool fpu_32bit; bool mips_isa_gte2; bool r6; GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit); - return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6)); + return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6)); } MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCpuInfo() { - // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that - // the kernel puts the appropriate feature flags in here. Sometimes it doesn't. - // Assume conservative defaults. - bool smp = false; - bool fpu_32bit; bool mips_isa_gte2; bool r6; GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit); - std::ifstream in("/proc/cpuinfo"); - if (!in.fail()) { - while (!in.eof()) { - std::string line; - std::getline(in, line); - if (!in.eof()) { - LOG(INFO) << "cpuinfo line: " << line; - if (line.find("processor") != std::string::npos && line.find(": 1") != std::string::npos) { - smp = true; - } - } - } - in.close(); - } else { - LOG(ERROR) << "Failed to open /proc/cpuinfo"; - } - return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6)); + return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6)); } MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromHwcap() { @@ -171,30 +143,23 @@ bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) con return false; } const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures(); - return (IsSmp() == other->IsSmp()) && - (fpu_32bit_ == other_as_mips->fpu_32bit_) && + return (fpu_32bit_ == other_as_mips->fpu_32bit_) && (mips_isa_gte2_ == other_as_mips->mips_isa_gte2_) && (r6_ == other_as_mips->r6_); } uint32_t MipsInstructionSetFeatures::AsBitmap() const { - return (IsSmp() ? kSmpBitfield : 0) | - (fpu_32bit_ ? kFpu32Bitfield : 0) | + return (fpu_32bit_ ? kFpu32Bitfield : 0) | (mips_isa_gte2_ ? kIsaRevGte2Bitfield : 0) | (r6_ ? kR6 : 0); } std::string MipsInstructionSetFeatures::GetFeatureString() const { std::string result; - if (IsSmp()) { - result += "smp"; - } else { - result += "-smp"; - } if (fpu_32bit_) { - result += ",fpu32"; + result += "fpu32"; } else { - result += ",-fpu32"; + result += "-fpu32"; } if (mips_isa_gte2_) { result += ",mips2"; @@ -209,7 +174,7 @@ std::string MipsInstructionSetFeatures::GetFeatureString() const { std::unique_ptr<const InstructionSetFeatures> MipsInstructionSetFeatures::AddFeaturesFromSplitString( - const bool smp, const std::vector<std::string>& features, std::string* error_msg) const { + const std::vector<std::string>& features, std::string* error_msg) const { bool fpu_32bit = fpu_32bit_; bool mips_isa_gte2 = mips_isa_gte2_; bool r6 = r6_; @@ -233,7 +198,7 @@ MipsInstructionSetFeatures::AddFeaturesFromSplitString( } } return std::unique_ptr<const InstructionSetFeatures>( - new MipsInstructionSetFeatures(smp, fpu_32bit, mips_isa_gte2, r6)); + new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6)); } } // namespace art diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h index c2a28dc7fa..1aec99fa73 100644 --- a/runtime/arch/mips/instruction_set_features_mips.h +++ b/runtime/arch/mips/instruction_set_features_mips.h @@ -80,12 +80,12 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures { protected: // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures. std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features, + AddFeaturesFromSplitString(const std::vector<std::string>& features, std::string* error_msg) const OVERRIDE; private: - MipsInstructionSetFeatures(bool smp, bool fpu_32bit, bool mips_isa_gte2, bool r6) - : InstructionSetFeatures(smp), + MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6) + : InstructionSetFeatures(), fpu_32bit_(fpu_32bit), mips_isa_gte2_(mips_isa_gte2), r6_(r6) { @@ -101,10 +101,9 @@ class MipsInstructionSetFeatures FINAL : public InstructionSetFeatures { // Bitmap positions for encoding features as a bitmap. enum { - kSmpBitfield = 1, - kFpu32Bitfield = 2, - kIsaRevGte2Bitfield = 4, - kR6 = 8, + kFpu32Bitfield = 1 << 0, + kIsaRevGte2Bitfield = 1 << 1, + kR6 = 1 << 2, }; const bool fpu_32bit_; diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc index 9b81ce2582..6613b84365 100644 --- a/runtime/arch/mips/instruction_set_features_mips_test.cc +++ b/runtime/arch/mips/instruction_set_features_mips_test.cc @@ -27,8 +27,8 @@ TEST(MipsInstructionSetFeaturesTest, MipsFeatures) { ASSERT_TRUE(mips_features.get() != nullptr) << error_msg; EXPECT_EQ(mips_features->GetInstructionSet(), kMips); EXPECT_TRUE(mips_features->Equals(mips_features.get())); - EXPECT_STREQ("smp,fpu32,mips2", mips_features->GetFeatureString().c_str()); - EXPECT_EQ(mips_features->AsBitmap(), 7U); + EXPECT_STREQ("fpu32,mips2", mips_features->GetFeatureString().c_str()); + EXPECT_EQ(mips_features->AsBitmap(), 3U); } } // namespace art diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 3e8cdc9374..964ea563b0 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -1831,116 +1831,10 @@ END \name // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). -ENTRY art_quick_alloc_object_rosalloc - - # Fast path rosalloc allocation - # a0: type_idx - # a1: ArtMethod* - # s1: Thread::Current - # ----------------------------- - # t0: class - # t1: object size - # t2: rosalloc run - # t3: thread stack top offset - # t4: thread stack bottom offset - # v0: free list head - # - # t5, t6 : temps - - lw $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_32($a1) # Load dex cache resolved types - # array. - - sll $t5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value. - addu $t5, $t0, $t5 # Compute the index. - lw $t0, 0($t5) # Load class (t0). - beqz $t0, .Lart_quick_alloc_object_rosalloc_slow_path - - li $t6, MIRROR_CLASS_STATUS_INITIALIZED - lw $t5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status. - bne $t5, $t6, .Lart_quick_alloc_object_rosalloc_slow_path - - # Add a fake dependence from the following access flag and size loads to the status load. This - # is to prevent those loads from being reordered above the status load and reading wrong values. - xor $t5, $t5, $t5 - addu $t0, $t0, $t5 - - lw $t5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has - li $t6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable. - and $t6, $t5, $t6 - bnez $t6, .Lart_quick_alloc_object_rosalloc_slow_path - - lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation - lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left. - bgeu $t3, $t4, .Lart_quick_alloc_object_rosalloc_slow_path - - lw $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1). - li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local - # allocation. - bgtu $t1, $t5, .Lart_quick_alloc_object_rosalloc_slow_path - - # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket - # quantum size and divide by the quantum size and subtract by 1. - - addiu $t1, $t1, -1 # Decrease obj size and shift right - srl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # by quantum. - - sll $t2, $t1, POINTER_SIZE_SHIFT - addu $t2, $t2, $s1 - lw $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2). - - # Load the free list head (v0). - # NOTE: this will be the return val. - - lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2) - beqz $v0, .Lart_quick_alloc_object_rosalloc_slow_path - nop - - # Load the next pointer of the head and update the list head with the next pointer. - - lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0) - sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2) - - # Store the class pointer in the header. This also overwrites the first pointer. The offsets are - # asserted to match. - -#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET -#error "Class pointer needs to overwrite next pointer." -#endif - - POISON_HEAP_REF $t0 - sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0) - - # Push the new object onto the thread local allocation stack and increment the thread local - # allocation stack top. - - sw $v0, 0($t3) - addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE - sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) - - # Decrement the size of the free list. - - lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2) - addiu $t5, $t5, -1 - sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2) - - sync # Fence. - - jalr $zero, $ra - nop - - .Lart_quick_alloc_object_rosalloc_slow_path: - - SETUP_SAVE_REFS_ONLY_FRAME - la $t9, artAllocObjectFromCodeRosAlloc - jalr $t9 - move $a2, $s1 # Pass self as argument. - RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER - -END art_quick_alloc_object_rosalloc -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) /* * Entry from managed code to resolve a string, this stub will allocate a String and deliver an diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc index 92c44e85de..5606c1d858 100644 --- a/runtime/arch/mips64/instruction_set_features_mips64.cc +++ b/runtime/arch/mips64/instruction_set_features_mips64.cc @@ -33,43 +33,19 @@ Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant( if (variant != "default" && variant != "mips64r6") { LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant; } - bool smp = true; // Conservative default. - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp)); + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); } -Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) { - bool smp = (bitmap & kSmpBitfield) != 0; - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp)); +Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap ATTRIBUTE_UNUSED) { + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); } Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() { - const bool smp = true; - - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp)); + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); } Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() { - // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that - // the kernel puts the appropriate feature flags in here. Sometimes it doesn't. - bool smp = false; - - std::ifstream in("/proc/cpuinfo"); - if (!in.fail()) { - while (!in.eof()) { - std::string line; - std::getline(in, line); - if (!in.eof()) { - LOG(INFO) << "cpuinfo line: " << line; - if (line.find("processor") != std::string::npos && line.find(": 1") != std::string::npos) { - smp = true; - } - } - } - in.close(); - } else { - LOG(ERROR) << "Failed to open /proc/cpuinfo"; - } - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(smp)); + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); } Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() { @@ -86,26 +62,20 @@ bool Mips64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) c if (kMips64 != other->GetInstructionSet()) { return false; } - return (IsSmp() == other->IsSmp()); + return true; } uint32_t Mips64InstructionSetFeatures::AsBitmap() const { - return (IsSmp() ? kSmpBitfield : 0); + return 0; } std::string Mips64InstructionSetFeatures::GetFeatureString() const { - std::string result; - if (IsSmp()) { - result += "smp"; - } else { - result += "-smp"; - } - return result; + return ""; } std::unique_ptr<const InstructionSetFeatures> Mips64InstructionSetFeatures::AddFeaturesFromSplitString( - const bool smp, const std::vector<std::string>& features, std::string* error_msg) const { + const std::vector<std::string>& features, std::string* error_msg) const { auto i = features.begin(); if (i != features.end()) { // We don't have any features. @@ -113,7 +83,7 @@ Mips64InstructionSetFeatures::AddFeaturesFromSplitString( *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str()); return nullptr; } - return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(smp)); + return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures()); } } // namespace art diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h index 2e66235506..c80c466dfc 100644 --- a/runtime/arch/mips64/instruction_set_features_mips64.h +++ b/runtime/arch/mips64/instruction_set_features_mips64.h @@ -29,7 +29,7 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures { public: // Process a CPU variant string like "r4000" and create InstructionSetFeatures. static Mips64FeaturesUniquePtr FromVariant(const std::string& variant, - std::string* error_msg); + std::string* error_msg); // Parse a bitmap and create an InstructionSetFeatures. static Mips64FeaturesUniquePtr FromBitmap(uint32_t bitmap); @@ -63,19 +63,13 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures { protected: // Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures. std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(const bool smp, - const std::vector<std::string>& features, + AddFeaturesFromSplitString(const std::vector<std::string>& features, std::string* error_msg) const OVERRIDE; private: - explicit Mips64InstructionSetFeatures(bool smp) : InstructionSetFeatures(smp) { + Mips64InstructionSetFeatures() : InstructionSetFeatures() { } - // Bitmap positions for encoding features as a bitmap. - enum { - kSmpBitfield = 1, - }; - DISALLOW_COPY_AND_ASSIGN(Mips64InstructionSetFeatures); }; diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc index dc3450677b..1d037947fa 100644 --- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc +++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc @@ -27,8 +27,8 @@ TEST(Mips64InstructionSetFeaturesTest, Mips64Features) { ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg; EXPECT_EQ(mips64_features->GetInstructionSet(), kMips64); EXPECT_TRUE(mips64_features->Equals(mips64_features.get())); - EXPECT_STREQ("smp", mips64_features->GetFeatureString().c_str()); - EXPECT_EQ(mips64_features->AsBitmap(), 1U); + EXPECT_STREQ("", mips64_features->GetFeatureString().c_str()); + EXPECT_EQ(mips64_features->AsBitmap(), 0U); } } // namespace art diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index 0861d2d73e..2a18d53aea 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -1775,107 +1775,9 @@ END \name // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). -ENTRY art_quick_alloc_object_rosalloc - - # Fast path rosalloc allocation - # a0: type_idx - # a1: ArtMethod* - # s1: Thread::Current - # ----------------------------- - # t0: class - # t1: object size - # t2: rosalloc run - # t3: thread stack top offset - # a4: thread stack bottom offset - # v0: free list head - # - # a5, a6 : temps - - ld $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_64($a1) # Load dex cache resolved types array. - - dsll $a5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value. - daddu $a5, $t0, $a5 # Compute the index. - lwu $t0, 0($a5) # Load class (t0). - beqzc $t0, .Lart_quick_alloc_object_rosalloc_slow_path - - li $a6, MIRROR_CLASS_STATUS_INITIALIZED - lwu $a5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status. - bnec $a5, $a6, .Lart_quick_alloc_object_rosalloc_slow_path - - # Add a fake dependence from the following access flag and size loads to the status load. This - # is to prevent those loads from being reordered above the status load and reading wrong values. - xor $a5, $a5, $a5 - daddu $t0, $t0, $a5 - - lwu $a5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has - li $a6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable. - and $a6, $a5, $a6 - bnezc $a6, .Lart_quick_alloc_object_rosalloc_slow_path - - ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack - ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left. - bgeuc $t3, $a4, .Lart_quick_alloc_object_rosalloc_slow_path - - lwu $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1). - li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local - # allocation. - bltuc $a5, $t1, .Lart_quick_alloc_object_rosalloc_slow_path - - # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket - # quantum size and divide by the quantum size and subtract by 1. - daddiu $t1, $t1, -1 # Decrease obj size and shift right by - dsrl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # quantum. - - dsll $t2, $t1, POINTER_SIZE_SHIFT - daddu $t2, $t2, $s1 - ld $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2). - - # Load the free list head (v0). - # NOTE: this will be the return val. - ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2) - beqzc $v0, .Lart_quick_alloc_object_rosalloc_slow_path - - # Load the next pointer of the head and update the list head with the next pointer. - ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0) - sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2) - - # Store the class pointer in the header. This also overwrites the first pointer. The offsets are - # asserted to match. - -#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET -#error "Class pointer needs to overwrite next pointer." -#endif - - POISON_HEAP_REF $t0 - sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0) - - # Push the new object onto the thread local allocation stack and increment the thread local - # allocation stack top. - sd $v0, 0($t3) - daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE - sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) - - # Decrement the size of the free list. - lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2) - addiu $a5, $a5, -1 - sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2) - - sync # Fence. - - jalr $zero, $ra - .cpreturn # Restore gp from t8 in branch delay slot. - -.Lart_quick_alloc_object_rosalloc_slow_path: - SETUP_SAVE_REFS_ONLY_FRAME - jal artAllocObjectFromCodeRosAlloc - move $a2 ,$s1 # Pass self as argument. - RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER - -END art_quick_alloc_object_rosalloc - -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) /* * Entry from managed code to resolve a string, this stub will allocate a String and deliver an diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S index db2fdcabea..abd9046174 100644 --- a/runtime/arch/quick_alloc_entrypoints.S +++ b/runtime/arch/quick_alloc_entrypoints.S @@ -15,15 +15,13 @@ */ .macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix -// Called by managed code to allocate an object. -TWO_ARG_DOWNCALL art_quick_alloc_object\c_suffix, artAllocObjectFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // Called by managed code to allocate an object of a resolved class. -TWO_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER +ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // Called by managed code to allocate an object of an initialized class. -TWO_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER +ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // Called by managed code to allocate an object when the caller doesn't know whether it has access // to the created type. -TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER +ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // Called by managed code to allocate an array. THREE_ARG_DOWNCALL art_quick_alloc_array\c_suffix, artAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // Called by managed code to allocate an array of a resolve class. @@ -61,14 +59,12 @@ GENERATE_ALLOC_ENTRYPOINTS _region_tlab_instrumented, RegionTLABInstrumented // Generate the allocation entrypoints for each allocator. This is used as an alternative to // GNERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in // hand-written assembly. -#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \ - TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER #define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \ - TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER + ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER #define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \ - TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER + ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER #define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \ - TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER + ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER #define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \ THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER #define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \ @@ -93,8 +89,7 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR .macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR // This is to be separately defined for each architecture to allow a hand-written assembly fast path. -// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) +// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB) @@ -109,8 +104,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) .macro GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR // This is to be separately defined for each architecture to allow a hand-written assembly fast path. -// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) +// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB) @@ -129,7 +123,6 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR .endm .macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc) @@ -142,7 +135,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented) @@ -156,8 +148,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMal GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented) // This is to be separately defined for each architecture to allow a hand-written assembly fast path. -// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc) +// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc) @@ -169,7 +160,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented) @@ -182,7 +172,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAl GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer) @@ -195,7 +184,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented) @@ -208,7 +196,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, B GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented) @@ -221,7 +208,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstr GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region) @@ -234,7 +220,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented) @@ -247,7 +232,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionI GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented) diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 9e385f839f..ee65fa8ab0 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -1062,12 +1062,8 @@ TEST_F(StubTest, AllocObject) { EXPECT_FALSE(self->IsExceptionPending()); { - // Use an arbitrary method from c to use as referrer - size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex().index_), // type_idx - // arbitrary - reinterpret_cast<size_t>(c->GetVirtualMethod(0, kRuntimePointerSize)), - 0U, - StubTest::GetEntrypoint(self, kQuickAllocObject), + size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U, + StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks), self); EXPECT_FALSE(self->IsExceptionPending()); @@ -1078,8 +1074,6 @@ TEST_F(StubTest, AllocObject) { } { - // We can use null in the second argument as we do not need a method here (not used in - // resolved/initialized cases) size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U, StubTest::GetEntrypoint(self, kQuickAllocObjectResolved), self); @@ -1092,8 +1086,6 @@ TEST_F(StubTest, AllocObject) { } { - // We can use null in the second argument as we do not need a method here (not used in - // resolved/initialized cases) size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U, StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized), self); diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc index c520d63cf3..578812297a 100644 --- a/runtime/arch/x86/instruction_set_features_x86.cc +++ b/runtime/arch/x86/instruction_set_features_x86.cc @@ -54,7 +54,6 @@ static constexpr const char* x86_variants_with_popcnt[] = { }; X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64, - bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2, @@ -62,16 +61,14 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64, bool has_AVX2, bool has_POPCNT) { if (x86_64) { - return X86FeaturesUniquePtr(new X86_64InstructionSetFeatures(smp, - has_SSSE3, + return X86FeaturesUniquePtr(new X86_64InstructionSetFeatures(has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT)); } else { - return X86FeaturesUniquePtr(new X86InstructionSetFeatures(smp, - has_SSSE3, + return X86FeaturesUniquePtr(new X86InstructionSetFeatures(has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, @@ -83,7 +80,6 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64, X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant( const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED, bool x86_64) { - bool smp = true; // Conservative default. bool has_SSSE3 = FindVariantInArray(x86_variants_with_ssse3, arraysize(x86_variants_with_ssse3), variant); bool has_SSE4_1 = FindVariantInArray(x86_variants_with_sse4_1, @@ -106,23 +102,20 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant( LOG(WARNING) << "Unexpected CPU variant for X86 using defaults: " << variant; } - return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); + return Create(x86_64, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); } X86FeaturesUniquePtr X86InstructionSetFeatures::FromBitmap(uint32_t bitmap, bool x86_64) { - bool smp = (bitmap & kSmpBitfield) != 0; bool has_SSSE3 = (bitmap & kSsse3Bitfield) != 0; bool has_SSE4_1 = (bitmap & kSse4_1Bitfield) != 0; bool has_SSE4_2 = (bitmap & kSse4_2Bitfield) != 0; bool has_AVX = (bitmap & kAvxBitfield) != 0; bool has_AVX2 = (bitmap & kAvxBitfield) != 0; bool has_POPCNT = (bitmap & kPopCntBitfield) != 0; - return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); + return Create(x86_64, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); } X86FeaturesUniquePtr X86InstructionSetFeatures::FromCppDefines(bool x86_64) { - const bool smp = true; - #ifndef __SSSE3__ const bool has_SSSE3 = false; #else @@ -159,13 +152,12 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::FromCppDefines(bool x86_64) { const bool has_POPCNT = true; #endif - return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); + return Create(x86_64, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); } X86FeaturesUniquePtr X86InstructionSetFeatures::FromCpuInfo(bool x86_64) { // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that // the kernel puts the appropriate feature flags in here. Sometimes it doesn't. - bool smp = false; bool has_SSSE3 = false; bool has_SSE4_1 = false; bool has_SSE4_2 = false; @@ -200,9 +192,6 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::FromCpuInfo(bool x86_64) { if (line.find("popcnt") != std::string::npos) { has_POPCNT = true; } - } else if (line.find("processor") != std::string::npos && - line.find(": 1") != std::string::npos) { - smp = true; } } } @@ -210,7 +199,7 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::FromCpuInfo(bool x86_64) { } else { LOG(ERROR) << "Failed to open /proc/cpuinfo"; } - return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); + return Create(x86_64, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); } X86FeaturesUniquePtr X86InstructionSetFeatures::FromHwcap(bool x86_64) { @@ -228,8 +217,7 @@ bool X86InstructionSetFeatures::Equals(const InstructionSetFeatures* other) cons return false; } const X86InstructionSetFeatures* other_as_x86 = other->AsX86InstructionSetFeatures(); - return (IsSmp() == other->IsSmp()) && - (has_SSSE3_ == other_as_x86->has_SSSE3_) && + return (has_SSSE3_ == other_as_x86->has_SSSE3_) && (has_SSE4_1_ == other_as_x86->has_SSE4_1_) && (has_SSE4_2_ == other_as_x86->has_SSE4_2_) && (has_AVX_ == other_as_x86->has_AVX_) && @@ -238,8 +226,7 @@ bool X86InstructionSetFeatures::Equals(const InstructionSetFeatures* other) cons } uint32_t X86InstructionSetFeatures::AsBitmap() const { - return (IsSmp() ? kSmpBitfield : 0) | - (has_SSSE3_ ? kSsse3Bitfield : 0) | + return (has_SSSE3_ ? kSsse3Bitfield : 0) | (has_SSE4_1_ ? kSse4_1Bitfield : 0) | (has_SSE4_2_ ? kSse4_2Bitfield : 0) | (has_AVX_ ? kAvxBitfield : 0) | @@ -249,15 +236,10 @@ uint32_t X86InstructionSetFeatures::AsBitmap() const { std::string X86InstructionSetFeatures::GetFeatureString() const { std::string result; - if (IsSmp()) { - result += "smp"; - } else { - result += "-smp"; - } if (has_SSSE3_) { - result += ",ssse3"; + result += "ssse3"; } else { - result += ",-ssse3"; + result += "-ssse3"; } if (has_SSE4_1_) { result += ",sse4.1"; @@ -288,7 +270,7 @@ std::string X86InstructionSetFeatures::GetFeatureString() const { } std::unique_ptr<const InstructionSetFeatures> X86InstructionSetFeatures::AddFeaturesFromSplitString( - const bool smp, const std::vector<std::string>& features, bool x86_64, + const std::vector<std::string>& features, bool x86_64, std::string* error_msg) const { bool has_SSSE3 = has_SSSE3_; bool has_SSE4_1 = has_SSE4_1_; @@ -327,7 +309,7 @@ std::unique_ptr<const InstructionSetFeatures> X86InstructionSetFeatures::AddFeat return nullptr; } } - return Create(x86_64, smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); + return Create(x86_64, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT); } } // namespace art diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h index 672892e5a5..eb8a710e37 100644 --- a/runtime/arch/x86/instruction_set_features_x86.h +++ b/runtime/arch/x86/instruction_set_features_x86.h @@ -69,18 +69,23 @@ class X86InstructionSetFeatures : public InstructionSetFeatures { protected: // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures. virtual std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features, + AddFeaturesFromSplitString(const std::vector<std::string>& features, std::string* error_msg) const OVERRIDE { - return AddFeaturesFromSplitString(smp, features, false, error_msg); + return AddFeaturesFromSplitString(features, false, error_msg); } std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features, - bool x86_64, std::string* error_msg) const; - - X86InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2, - bool has_AVX, bool has_AVX2, bool has_POPCNT) - : InstructionSetFeatures(smp), + AddFeaturesFromSplitString(const std::vector<std::string>& features, + bool x86_64, + std::string* error_msg) const; + + X86InstructionSetFeatures(bool has_SSSE3, + bool has_SSE4_1, + bool has_SSE4_2, + bool has_AVX, + bool has_AVX2, + bool has_POPCNT) + : InstructionSetFeatures(), has_SSSE3_(has_SSSE3), has_SSE4_1_(has_SSE4_1), has_SSE4_2_(has_SSE4_2), @@ -90,7 +95,6 @@ class X86InstructionSetFeatures : public InstructionSetFeatures { } static X86FeaturesUniquePtr Create(bool x86_64, - bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2, @@ -101,13 +105,12 @@ class X86InstructionSetFeatures : public InstructionSetFeatures { private: // Bitmap positions for encoding features as a bitmap. enum { - kSmpBitfield = 1, - kSsse3Bitfield = 2, - kSse4_1Bitfield = 4, - kSse4_2Bitfield = 8, - kAvxBitfield = 16, - kAvx2Bitfield = 32, - kPopCntBitfield = 64, + kSsse3Bitfield = 1 << 0, + kSse4_1Bitfield = 1 << 1, + kSse4_2Bitfield = 1 << 2, + kAvxBitfield = 1 << 3, + kAvx2Bitfield = 1 << 4, + kPopCntBitfield = 1 << 5, }; const bool has_SSSE3_; // x86 128bit SIMD - Supplemental SSE. diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc index 9e154c6ecf..7e6ad3ecbf 100644 --- a/runtime/arch/x86/instruction_set_features_x86_test.cc +++ b/runtime/arch/x86/instruction_set_features_x86_test.cc @@ -27,9 +27,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromDefaultVariant) { ASSERT_TRUE(x86_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_features->GetInstructionSet(), kX86); EXPECT_TRUE(x86_features->Equals(x86_features.get())); - EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", + EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", x86_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_features->AsBitmap(), 1U); + EXPECT_EQ(x86_features->AsBitmap(), 0U); } TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) { @@ -40,9 +40,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) { ASSERT_TRUE(x86_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_features->GetInstructionSet(), kX86); EXPECT_TRUE(x86_features->Equals(x86_features.get())); - EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", + EXPECT_STREQ("ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", x86_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_features->AsBitmap(), 3U); + EXPECT_EQ(x86_features->AsBitmap(), 1U); // Build features for a 32-bit x86 default processor. std::unique_ptr<const InstructionSetFeatures> x86_default_features( @@ -50,9 +50,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) { ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86); EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get())); - EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", + EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", x86_default_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_default_features->AsBitmap(), 1U); + EXPECT_EQ(x86_default_features->AsBitmap(), 0U); // Build features for a 64-bit x86-64 atom processor. std::unique_ptr<const InstructionSetFeatures> x86_64_features( @@ -60,9 +60,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) { ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64); EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get())); - EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", + EXPECT_STREQ("ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", x86_64_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_64_features->AsBitmap(), 3U); + EXPECT_EQ(x86_64_features->AsBitmap(), 1U); EXPECT_FALSE(x86_64_features->Equals(x86_features.get())); EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get())); @@ -77,9 +77,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) { ASSERT_TRUE(x86_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_features->GetInstructionSet(), kX86); EXPECT_TRUE(x86_features->Equals(x86_features.get())); - EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt", + EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt", x86_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_features->AsBitmap(), 79U); + EXPECT_EQ(x86_features->AsBitmap(), 39U); // Build features for a 32-bit x86 default processor. std::unique_ptr<const InstructionSetFeatures> x86_default_features( @@ -87,9 +87,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) { ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86); EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get())); - EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", + EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", x86_default_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_default_features->AsBitmap(), 1U); + EXPECT_EQ(x86_default_features->AsBitmap(), 0U); // Build features for a 64-bit x86-64 silvermont processor. std::unique_ptr<const InstructionSetFeatures> x86_64_features( @@ -97,9 +97,9 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) { ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64); EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get())); - EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt", + EXPECT_STREQ("ssse3,sse4.1,sse4.2,-avx,-avx2,popcnt", x86_64_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_64_features->AsBitmap(), 79U); + EXPECT_EQ(x86_64_features->AsBitmap(), 39U); EXPECT_FALSE(x86_64_features->Equals(x86_features.get())); EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get())); diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index c6f4c0346f..62c29cf268 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -956,52 +956,42 @@ END_MACRO // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). -DEFINE_FUNCTION art_quick_alloc_object_rosalloc +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc). +DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc // Fast path rosalloc allocation. - // eax: uint32_t type_idx/return value, ecx: ArtMethod* - // ebx, edx: free - PUSH edi - movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array - // Load the class (edx) - movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx - testl %edx, %edx // Check null class - jz .Lart_quick_alloc_object_rosalloc_slow_path - + // eax: type/return value + // ecx, ebx, edx: free movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread // Check if the thread local allocation // stack has room - movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi - cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %edi - jae .Lart_quick_alloc_object_rosalloc_slow_path + movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %ecx + cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %ecx + jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path - movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %edi // Load the object size (edi) + movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size (ecx) // Check if the size is for a thread // local allocation. Also does the // finalizable and initialization check. - cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %edi - ja .Lart_quick_alloc_object_rosalloc_slow_path - shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %edi // Calculate the rosalloc bracket index + cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %ecx + ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path + shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %ecx // Calculate the rosalloc bracket index // from object size. // Load thread local rosalloc run (ebx) // Subtract __SIZEOF_POINTER__ to subtract // one from edi as there is no 0 byte run // and the size is already aligned. - movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %edi, __SIZEOF_POINTER__), %ebx + movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %ecx, __SIZEOF_POINTER__), %ebx // Load free_list head (edi), // this will be the return value. - movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %edi - test %edi, %edi - jz .Lart_quick_alloc_object_rosalloc_slow_path + movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %ecx + jecxz .Lart_quick_alloc_object_resolved_rosalloc_slow_path // Point of no slow path. Won't go to - // the slow path from here on. Ok to - // clobber eax and ecx. - movl %edi, %eax + // the slow path from here on. // Load the next pointer of the head // and update head of free list with // next pointer - movl ROSALLOC_SLOT_NEXT_OFFSET(%eax), %edi - movl %edi, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx) + movl ROSALLOC_SLOT_NEXT_OFFSET(%ecx), %edx + movl %edx, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx) // Decrement size of free list by 1 decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%ebx) // Store the class pointer in the @@ -1011,141 +1001,104 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc #if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET #error "Class pointer needs to overwrite next pointer." #endif - POISON_HEAP_REF edx - movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax) + POISON_HEAP_REF eax + movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%ecx) movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread // Push the new object onto the thread // local allocation stack and // increment the thread local // allocation stack top. - movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi - movl %eax, (%edi) - addl LITERAL(COMPRESSED_REFERENCE_SIZE), %edi - movl %edi, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx) + movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %eax + movl %ecx, (%eax) + addl LITERAL(COMPRESSED_REFERENCE_SIZE), %eax + movl %eax, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx) // No fence needed for x86. - POP edi + movl %ecx, %eax // Move object to return register ret -.Lart_quick_alloc_object_rosalloc_slow_path: - POP edi +.Lart_quick_alloc_object_resolved_rosalloc_slow_path: SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - PUSH eax // alignment padding + subl LITERAL(8), %esp // alignment padding pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx PUSH eax - call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*) + call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*) addl LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception -END_FUNCTION art_quick_alloc_object_rosalloc +END_FUNCTION art_quick_alloc_object_resolved_rosalloc -// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. +// The common fast path code for art_quick_alloc_object_resolved_tlab +// and art_quick_alloc_object_resolved_region_tlab. // -// EAX: type_idx/return_value, ECX: ArtMethod*, EDX: the class. -MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel) - testl %edx, %edx // Check null class - jz VAR(slowPathLabel) +// EAX: type/return_value +MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel) movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end. subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size. - movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %esi // Load the object size. - cmpl %edi, %esi // Check if it fits. + movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size. + cmpl %edi, %ecx // Check if it fits. ja VAR(slowPathLabel) - movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos + movl THREAD_LOCAL_POS_OFFSET(%ebx), %edx // Load thread_local_pos // as allocated object. - addl %eax, %esi // Add the object size. - movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos. + addl %edx, %ecx // Add the object size. + movl %ecx, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos. incl THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects. // Store the class pointer in the header. // No fence needed for x86. - POISON_HEAP_REF edx - movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax) + POISON_HEAP_REF eax + movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%edx) + movl %edx, %eax POP edi - POP esi ret // Fast path succeeded. END_MACRO -// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. -MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name) +// The common slow path code for art_quick_alloc_object_resolved_tlab +// and art_quick_alloc_object_resolved_region_tlab. +MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name) POP edi - POP esi SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - PUSH eax // alignment padding + subl LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx PUSH eax - call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*) + call CALLVAR(cxx_name) // cxx_name(arg0, Thread*) addl LITERAL(16), %esp CFI_ADJUST_CFA_OFFSET(-16) RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception END_MACRO -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be called +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be called // for CC if the GC is not marking. -DEFINE_FUNCTION art_quick_alloc_object_tlab +DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab // Fast path tlab allocation. - // EAX: uint32_t type_idx/return value, ECX: ArtMethod*. - // EBX, EDX: free. - PUSH esi + // EAX: type + // EBX, ECX, EDX: free. PUSH edi - movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array - // Might need to break down into multiple instructions to get the base address in a register. - // Load the class - movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx - ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path -.Lart_quick_alloc_object_tlab_slow_path: - ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB -END_FUNCTION art_quick_alloc_object_tlab - -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB). -DEFINE_FUNCTION art_quick_alloc_object_region_tlab + ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path +.Lart_quick_alloc_object_resolved_tlab_slow_path: + ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB +END_FUNCTION art_quick_alloc_object_resolved_tlab + +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB). +DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab // Fast path region tlab allocation. - // EAX: uint32_t type_idx/return value, ECX: ArtMethod*. - // EBX, EDX: free. + // EAX: type/return value + // EBX, ECX, EDX: free. #if !defined(USE_READ_BARRIER) int3 int3 #endif - PUSH esi PUSH edi - movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array - // Might need to break down into multiple instructions to get the base address in a register. - // Load the class - movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx - // Read barrier for class load. - cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET - jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit - // Null check so that we can load the lock word. - testl %edx, %edx - jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit - // Check the mark bit, if it is 1 return. - testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx) - jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path -.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit: - ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path -.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path: - // The read barrier slow path. Mark the class. - PUSH eax - PUSH ecx - // Outgoing argument set up - subl MACRO_LITERAL(8), %esp // Alignment padding - CFI_ADJUST_CFA_OFFSET(8) - PUSH edx // Pass the class as the first param. - call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj) - movl %eax, %edx - addl MACRO_LITERAL(12), %esp - CFI_ADJUST_CFA_OFFSET(-12) - POP ecx - POP eax - jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit -.Lart_quick_alloc_object_region_tlab_slow_path: - ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB -END_FUNCTION art_quick_alloc_object_region_tlab + ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path +.Lart_quick_alloc_object_resolved_region_tlab_slow_path: + ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB +END_FUNCTION art_quick_alloc_object_resolved_region_tlab + DEFINE_FUNCTION art_quick_resolve_string SETUP_SAVE_EVERYTHING_FRAME ebx, ebx diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h index bc0f708e20..83f4093682 100644 --- a/runtime/arch/x86_64/instruction_set_features_x86_64.h +++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h @@ -68,15 +68,19 @@ class X86_64InstructionSetFeatures FINAL : public X86InstructionSetFeatures { protected: // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures. std::unique_ptr<const InstructionSetFeatures> - AddFeaturesFromSplitString(const bool smp, const std::vector<std::string>& features, + AddFeaturesFromSplitString(const std::vector<std::string>& features, std::string* error_msg) const OVERRIDE { - return X86InstructionSetFeatures::AddFeaturesFromSplitString(smp, features, true, error_msg); + return X86InstructionSetFeatures::AddFeaturesFromSplitString(features, true, error_msg); } private: - X86_64InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2, - bool has_AVX, bool has_AVX2, bool has_POPCNT) - : X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, + X86_64InstructionSetFeatures(bool has_SSSE3, + bool has_SSE4_1, + bool has_SSE4_2, + bool has_AVX, + bool has_AVX2, + bool has_POPCNT) + : X86InstructionSetFeatures(has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2, has_POPCNT) { } diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc index f2b2cd85c5..3c2ceacc35 100644 --- a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc +++ b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc @@ -27,9 +27,9 @@ TEST(X86_64InstructionSetFeaturesTest, X86Features) { ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg; EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64); EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get())); - EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", + EXPECT_STREQ("-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-popcnt", x86_64_features->GetFeatureString().c_str()); - EXPECT_EQ(x86_64_features->AsBitmap(), 1U); + EXPECT_EQ(x86_64_features->AsBitmap(), 0U); } } // namespace art diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 4c46b08a9e..facd563428 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -983,7 +983,6 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS // Comment out allocators that have x86_64 specific asm. // Region TLAB: -// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) @@ -996,11 +995,9 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) // Normal TLAB: -// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB) -// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB) // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB) @@ -1009,29 +1006,25 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB) -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). -DEFINE_FUNCTION art_quick_alloc_object_rosalloc + +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc). +DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc // Fast path rosalloc allocation. - // RDI: type_idx, RSI: ArtMethod*, RAX: return value - // RDX, RCX, R8, R9: free. - movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array - // Load the class (edx) - movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx - testl %edx, %edx // Check null class - jz .Lart_quick_alloc_object_rosalloc_slow_path + // RDI: mirror::Class*, RAX: return value + // RSI, RDX, RCX, R8, R9: free. // Check if the thread local // allocation stack has room. movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top. cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx - jae .Lart_quick_alloc_object_rosalloc_slow_path + jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path // Load the object size - movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %eax + movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %eax // Check if the size is for a thread // local allocation. Also does the // initialized and finalizable checks. cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax - ja .Lart_quick_alloc_object_rosalloc_slow_path + ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path // Compute the rosalloc bracket index // from the size. shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax @@ -1045,7 +1038,7 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc // will be the return val. movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax testq %rax, %rax - jz .Lart_quick_alloc_object_rosalloc_slow_path + jz .Lart_quick_alloc_object_resolved_rosalloc_slow_path // "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi. // Push the new object onto the thread // local allocation stack and @@ -1066,17 +1059,17 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc #if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET #error "Class pointer needs to overwrite next pointer." #endif - POISON_HEAP_REF edx - movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax) + POISON_HEAP_REF edi + movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax) // Decrement the size of the free list decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9) // No fence necessary for x86. ret -.Lart_quick_alloc_object_rosalloc_slow_path: +.Lart_quick_alloc_object_resolved_rosalloc_slow_path: SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC // Outgoing argument set up - movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*) + movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() + call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception END_FUNCTION art_quick_alloc_object_rosalloc @@ -1095,19 +1088,19 @@ END_MACRO // TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as // ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH. // -// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value. -// RCX: scratch, r8: Thread::Current(). +// RDI: the class, RAX: return value. +// RCX, RSI, RDX: scratch, r8: Thread::Current(). MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel) ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel)) END_MACRO // The fast path code for art_quick_alloc_object_initialized_region_tlab. // -// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value. -// RCX: scratch, r8: Thread::Current(). +// RDI: the class, RSI: ArtMethod*, RAX: return value. +// RCX, RSI, RDX: scratch, r8: Thread::Current(). MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel) movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread - movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %ecx // Load the object size. + movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %ecx // Load the object size. movq THREAD_LOCAL_POS_OFFSET(%r8), %rax addq %rax, %rcx // Add size to pos, note that these // are both 32 bit ints, overflow @@ -1120,8 +1113,8 @@ MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel) // Store the class pointer in the // header. // No fence needed for x86. - POISON_HEAP_REF edx - movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax) + POISON_HEAP_REF edi + movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax) ret // Fast path succeeded. END_MACRO @@ -1164,12 +1157,14 @@ MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, slowPathLabel) ret // Fast path succeeded. END_MACRO -// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. + +// The common slow path code for art_quick_alloc_object_{resolved, initialized}_tlab +// and art_quick_alloc_object_{resolved, initialized}_region_tlab. MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name) SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC // Outgoing argument set up - movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*) + movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() + call CALLVAR(cxx_name) // cxx_name(arg0, Thread*) RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception END_MACRO @@ -1184,26 +1179,11 @@ MACRO1(ALLOC_ARRAY_TLAB_SLOW_PATH, cxx_name) RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception END_MACRO -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be -// called with CC if the GC is not active. -DEFINE_FUNCTION art_quick_alloc_object_tlab - // RDI: uint32_t type_idx, RSI: ArtMethod* - // RDX, RCX, R8, R9: free. RAX: return val. - movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array - // Might need to break down into multiple instructions to get the base address in a register. - // Load the class - movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx - ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path -.Lart_quick_alloc_object_tlab_slow_path: - ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB -END_FUNCTION art_quick_alloc_object_tlab - // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be // called with CC if the GC is not active. DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab - // RDI: mirror::Class* klass, RSI: ArtMethod* - // RDX, RCX, R8, R9: free. RAX: return val. - movq %rdi, %rdx + // RDI: mirror::Class* klass + // RDX, RSI, RCX, R8, R9: free. RAX: return val. ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path .Lart_quick_alloc_object_resolved_tlab_slow_path: ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB @@ -1212,9 +1192,8 @@ END_FUNCTION art_quick_alloc_object_resolved_tlab // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB). // May be called with CC if the GC is not active. DEFINE_FUNCTION art_quick_alloc_object_initialized_tlab - // RDI: mirror::Class* klass, RSI: ArtMethod* - // RDX, RCX, R8, R9: free. RAX: return val. - movq %rdi, %rdx + // RDI: mirror::Class* klass + // RDX, RSI, RCX, R8, R9: free. RAX: return val. ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_tlab_slow_path .Lart_quick_alloc_object_initialized_tlab_slow_path: ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedTLAB @@ -1292,49 +1271,12 @@ DEFINE_FUNCTION art_quick_alloc_array_resolved_region_tlab ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedRegionTLAB END_FUNCTION art_quick_alloc_array_resolved_region_tlab -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB). -DEFINE_FUNCTION art_quick_alloc_object_region_tlab - // Fast path region tlab allocation. - // RDI: uint32_t type_idx, RSI: ArtMethod* - // RDX, RCX, R8, R9: free. RAX: return val. - ASSERT_USE_READ_BARRIER - movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array - movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx // Load the class - // Null check so that we can load the lock word. - testl %edx, %edx - jz .Lart_quick_alloc_object_region_tlab_slow_path - // Since we have allocation entrypoint switching, we know the GC is marking. - // Check the mark bit, if it is 0, do the read barrier mark. - testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx) - jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path -.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit: - // Use resolved one since we already did the null check. - ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path -.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path: - // The read barrier slow path. Mark the class. - PUSH rdi - PUSH rsi - subq LITERAL(8), %rsp // 16 byte alignment - // Outgoing argument set up - movq %rdx, %rdi // Pass the class as the first param. - call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj) - movq %rax, %rdx - addq LITERAL(8), %rsp - POP rsi - POP rdi - jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit -.Lart_quick_alloc_object_region_tlab_slow_path: - ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB -END_FUNCTION art_quick_alloc_object_region_tlab - // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB). DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab // Fast path region tlab allocation. - // RDI: mirror::Class* klass, RSI: ArtMethod* - // RDX, RCX, R8, R9: free. RAX: return val. + // RDI: mirror::Class* klass + // RDX, RSI, RCX, R8, R9: free. RAX: return val. ASSERT_USE_READ_BARRIER - // No read barrier since the caller is responsible for that. - movq %rdi, %rdx ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path .Lart_quick_alloc_object_resolved_region_tlab_slow_path: ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB @@ -1343,10 +1285,9 @@ END_FUNCTION art_quick_alloc_object_resolved_region_tlab // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB). DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab // Fast path region tlab allocation. - // RDI: mirror::Class* klass, RSI: ArtMethod* - // RDX, RCX, R8, R9: free. RAX: return val. + // RDI: mirror::Class* klass + // RDX, RSI, RCX, R8, R9: free. RAX: return val. ASSERT_USE_READ_BARRIER - movq %rdi, %rdx // No read barrier since the caller is responsible for that. ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_region_tlab_slow_path .Lart_quick_alloc_object_initialized_region_tlab_slow_path: diff --git a/runtime/art_method.h b/runtime/art_method.h index b38508b757..11dcc35df5 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -95,18 +95,20 @@ class ArtMethod FINAL { // This setter guarantees atomicity. void AddAccessFlags(uint32_t flag) { - uint32_t old_access_flags = access_flags_.load(std::memory_order_relaxed); + uint32_t old_access_flags; uint32_t new_access_flags; do { + old_access_flags = access_flags_.load(std::memory_order_relaxed); new_access_flags = old_access_flags | flag; } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags)); } // This setter guarantees atomicity. void ClearAccessFlags(uint32_t flag) { - uint32_t old_access_flags = access_flags_.load(std::memory_order_relaxed); + uint32_t old_access_flags; uint32_t new_access_flags; do { + old_access_flags = access_flags_.load(std::memory_order_relaxed); new_access_flags = old_access_flags & ~flag; } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags)); } diff --git a/runtime/asm_support.h b/runtime/asm_support.h index e4972da13d..bfdddf7b03 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -98,7 +98,7 @@ ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET, ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET, art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value()) // Offset of field Thread::tlsPtr_.thread_local_objects. -#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + 2 * __SIZEOF_POINTER__) +#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET, art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value()) // Offset of field Thread::tlsPtr_.mterp_current_ibase. diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 1dca4286da..55b4306427 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -26,7 +26,7 @@ // Headers for LogMessage::LogLine. #ifdef ART_TARGET_ANDROID -#include <android/log.h> +#include <log/log.h> #else #include <sys/types.h> #include <unistd.h> diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 5b8d4e42a3..8586b783a2 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -4280,9 +4280,10 @@ std::string ClassLinker::GetDescriptorForProxy(ObjPtr<mirror::Class> proxy_class void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) { // Create constructor for Proxy that must initialize the method. - CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 18u); + CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 23u); + ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->GetDirectMethodUnchecked( - 2, image_pointer_size_); + 8, image_pointer_size_); DCHECK_EQ(std::string(proxy_constructor->GetName()), "<init>"); // Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden // constructor method. diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 14c9c21356..469c45c10c 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -127,43 +127,21 @@ inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveTyp self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */); } -template <const bool kAccessCheck> -ALWAYS_INLINE -inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx, - ArtMethod* method, - Thread* self, - bool* slow_path) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - PointerSize pointer_size = class_linker->GetImagePointerSize(); - mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size); - if (UNLIKELY(klass == nullptr)) { - klass = class_linker->ResolveType(type_idx, method); +ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(mirror::Class* klass, + Thread* self, + bool* slow_path) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_) { + if (UNLIKELY(!klass->IsInstantiable())) { + self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str()); *slow_path = true; - if (klass == nullptr) { - DCHECK(self->IsExceptionPending()); - return nullptr; // Failure - } else { - DCHECK(!self->IsExceptionPending()); - } + return nullptr; // Failure } - if (kAccessCheck) { - if (UNLIKELY(!klass->IsInstantiable())) { - self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str()); - *slow_path = true; - return nullptr; // Failure - } - if (UNLIKELY(klass->IsClassClass())) { - ThrowIllegalAccessError(nullptr, "Class %s is inaccessible", - klass->PrettyDescriptor().c_str()); - *slow_path = true; - return nullptr; // Failure - } - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); - *slow_path = true; - return nullptr; // Failure - } + if (UNLIKELY(klass->IsClassClass())) { + ThrowIllegalAccessError(nullptr, "Class %s is inaccessible", + klass->PrettyDescriptor().c_str()); + *slow_path = true; + return nullptr; // Failure } if (UNLIKELY(!klass->IsInitialized())) { StackHandleScope<1> hs(self); @@ -191,7 +169,9 @@ inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx, ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, Thread* self, - bool* slow_path) { + bool* slow_path) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_) { if (UNLIKELY(!klass->IsInitialized())) { StackHandleScope<1> hs(self); Handle<mirror::Class> h_class(hs.NewHandle(klass)); @@ -213,18 +193,15 @@ inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, return klass; } -// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it -// cannot be resolved, throw an error. If it can, use it to create an instance. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -template <bool kAccessCheck, bool kInstrumented> +// Allocate an instance of klass. Throws InstantationError if klass is not instantiable, +// or IllegalAccessError if klass is j.l.Class. Performs a clinit check too. +template <bool kInstrumented> ALWAYS_INLINE -inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx, - ArtMethod* method, +inline mirror::Object* AllocObjectFromCode(mirror::Class* klass, Thread* self, gc::AllocatorType allocator_type) { bool slow_path = false; - mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path); + klass = CheckObjectAlloc(klass, self, &slow_path); if (UNLIKELY(slow_path)) { if (klass == nullptr) { return nullptr; diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 7cc136e227..4794610ca8 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -45,27 +45,10 @@ class OatQuickMethodHeader; class ScopedObjectAccessAlreadyRunnable; class Thread; -template <const bool kAccessCheck> -ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx, - ArtMethod* method, - Thread* self, - bool* slow_path) - REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!Roles::uninterruptible_); - -ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, - Thread* self, - bool* slow_path) - REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!Roles::uninterruptible_); - // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -template <bool kAccessCheck, bool kInstrumented> -ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx, - ArtMethod* method, +template <bool kInstrumented> +ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(mirror::Class* klass, Thread* self, gc::AllocatorType allocator_type) REQUIRES_SHARED(Locks::mutator_lock_) diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index 82bb8e53c6..2d06508069 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -29,87 +29,58 @@ namespace art { static constexpr bool kUseTlabFastPath = true; +template <bool kInitialized, + bool kFinalize, + bool kInstrumented, + gc::AllocatorType allocator_type> +static ALWAYS_INLINE inline mirror::Object* artAllocObjectFromCode( + mirror::Class* klass, + Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { + ScopedQuickEntrypointChecks sqec(self); + DCHECK(klass != nullptr); + if (kUseTlabFastPath && !kInstrumented && allocator_type == gc::kAllocatorTypeTLAB) { + if (kInitialized || klass->IsInitialized()) { + if (!kFinalize || !klass->IsFinalizable()) { + size_t byte_count = klass->GetObjectSize(); + byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); + mirror::Object* obj; + if (LIKELY(byte_count < self->TlabSize())) { + obj = self->AllocTlab(byte_count); + DCHECK(obj != nullptr) << "AllocTlab can't fail"; + obj->SetClass(klass); + if (kUseBakerReadBarrier) { + obj->AssertReadBarrierState(); + } + QuasiAtomic::ThreadFenceForConstructor(); + return obj; + } + } + } + } + if (kInitialized) { + return AllocObjectFromCodeInitialized<kInstrumented>(klass, self, allocator_type); + } else if (!kFinalize) { + return AllocObjectFromCodeResolved<kInstrumented>(klass, self, allocator_type); + } else { + return AllocObjectFromCode<kInstrumented>(klass, self, allocator_type); + } +} + #define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \ -extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \ - uint32_t type_idx, ArtMethod* method, Thread* self) \ +extern "C" mirror::Object* artAllocObjectFromCodeWithChecks##suffix##suffix2( \ + mirror::Class* klass, Thread* self) \ REQUIRES_SHARED(Locks::mutator_lock_) { \ - ScopedQuickEntrypointChecks sqec(self); \ - if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \ - mirror::Class* klass = method->GetDexCacheResolvedType<false>(dex::TypeIndex(type_idx), \ - kRuntimePointerSize); \ - if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \ - size_t byte_count = klass->GetObjectSize(); \ - byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \ - mirror::Object* obj; \ - if (LIKELY(byte_count < self->TlabSize())) { \ - obj = self->AllocTlab(byte_count); \ - DCHECK(obj != nullptr) << "AllocTlab can't fail"; \ - obj->SetClass(klass); \ - if (kUseBakerReadBarrier) { \ - obj->AssertReadBarrierState(); \ - } \ - QuasiAtomic::ThreadFenceForConstructor(); \ - return obj; \ - } \ - } \ - } \ - return AllocObjectFromCode<false, instrumented_bool>(dex::TypeIndex(type_idx), \ - method, \ - self, \ - allocator_type); \ + return artAllocObjectFromCode<false, true, instrumented_bool, allocator_type>(klass, self); \ } \ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \ - mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \ + mirror::Class* klass, Thread* self) \ REQUIRES_SHARED(Locks::mutator_lock_) { \ - ScopedQuickEntrypointChecks sqec(self); \ - if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \ - if (LIKELY(klass->IsInitialized())) { \ - size_t byte_count = klass->GetObjectSize(); \ - byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \ - mirror::Object* obj; \ - if (LIKELY(byte_count < self->TlabSize())) { \ - obj = self->AllocTlab(byte_count); \ - DCHECK(obj != nullptr) << "AllocTlab can't fail"; \ - obj->SetClass(klass); \ - if (kUseBakerReadBarrier) { \ - obj->AssertReadBarrierState(); \ - } \ - QuasiAtomic::ThreadFenceForConstructor(); \ - return obj; \ - } \ - } \ - } \ - return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \ + return artAllocObjectFromCode<false, false, instrumented_bool, allocator_type>(klass, self); \ } \ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \ - mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \ - REQUIRES_SHARED(Locks::mutator_lock_) { \ - ScopedQuickEntrypointChecks sqec(self); \ - if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \ - size_t byte_count = klass->GetObjectSize(); \ - byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \ - mirror::Object* obj; \ - if (LIKELY(byte_count < self->TlabSize())) { \ - obj = self->AllocTlab(byte_count); \ - DCHECK(obj != nullptr) << "AllocTlab can't fail"; \ - obj->SetClass(klass); \ - if (kUseBakerReadBarrier) { \ - obj->AssertReadBarrierState(); \ - } \ - QuasiAtomic::ThreadFenceForConstructor(); \ - return obj; \ - } \ - } \ - return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \ -} \ -extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \ - uint32_t type_idx, ArtMethod* method, Thread* self) \ + mirror::Class* klass, Thread* self) \ REQUIRES_SHARED(Locks::mutator_lock_) { \ - ScopedQuickEntrypointChecks sqec(self); \ - return AllocObjectFromCode<true, instrumented_bool>(dex::TypeIndex(type_idx), \ - method, \ - self, \ - allocator_type); \ + return artAllocObjectFromCode<true, false, instrumented_bool, allocator_type>(klass, self); \ } \ extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ @@ -220,10 +191,9 @@ GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RegionTLAB, gc::kAllocatorTypeRegionTLAB) extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass); \ +extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass); \ +extern "C" void* art_quick_alloc_object_with_checks##suffix(mirror::Class* klass); \ extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \ @@ -233,9 +203,9 @@ extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t, extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \ -extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \ +extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass); \ +extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass); \ +extern "C" void* art_quick_alloc_object_with_checks##suffix##_instrumented(mirror::Class* klass); \ extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \ extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \ @@ -246,10 +216,9 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \ qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \ - qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \ qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \ - qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \ + qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix##_instrumented; \ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \ qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix##_instrumented; \ @@ -259,10 +228,9 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument qpoints->pAllocArray = art_quick_alloc_array##suffix; \ qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \ - qpoints->pAllocObject = art_quick_alloc_object##suffix; \ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \ qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \ - qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \ + qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix; \ qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \ qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \ qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix; \ diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index a1c5082c93..0911aeb0f4 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -23,10 +23,9 @@ V(AllocArray, void*, uint32_t, int32_t, ArtMethod*) \ V(AllocArrayResolved, void*, mirror::Class*, int32_t, ArtMethod*) \ V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \ - V(AllocObject, void*, uint32_t, ArtMethod*) \ - V(AllocObjectResolved, void*, mirror::Class*, ArtMethod*) \ - V(AllocObjectInitialized, void*, mirror::Class*, ArtMethod*) \ - V(AllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*) \ + V(AllocObjectResolved, void*, mirror::Class*) \ + V(AllocObjectInitialized, void*, mirror::Class*) \ + V(AllocObjectWithChecks, void*, mirror::Class*) \ V(CheckAndAllocArray, void*, uint32_t, int32_t, ArtMethod*) \ V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \ V(AllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t) \ diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index bf1d4ea1a1..a3e5b552b5 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -705,7 +705,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, QuickExceptionHandler::DumpFramesWithType(self, true); } - mirror::Throwable* pending_exception = nullptr; + ObjPtr<mirror::Throwable> pending_exception; bool from_code = false; self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code); diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc index 12836602d5..6866abb6ae 100644 --- a/runtime/entrypoints_order_test.cc +++ b/runtime/entrypoints_order_test.cc @@ -122,9 +122,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest { // Skip across the entrypoints structures. + EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*)); - EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_start, sizeof(void*)); - EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_objects, sizeof(void*)); + EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(size_t)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*)); @@ -156,13 +156,13 @@ class EntrypointsOrderTest : public CommonRuntimeTest { EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocArrayWithAccessCheck, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, sizeof(void*)); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObjectResolved, + sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectResolved, pAllocObjectInitialized, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithAccessCheck, + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithChecks, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithAccessCheck, pCheckAndAllocArray, + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithChecks, pCheckAndAllocArray, sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArray, pCheckAndAllocArrayWithAccessCheck, sizeof(void*)); diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index cebb5668cc..e1117e6ea3 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -58,7 +58,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, bool measure_read_barrier_slow_path) : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + - "concurrent copying + mark sweep"), + "concurrent copying"), region_space_(nullptr), gc_barrier_(new Barrier(0)), gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", kDefaultGcMarkStackSize, diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index a815b830c1..f2aa5a7599 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -89,7 +89,7 @@ void SemiSpace::BindBitmaps() { SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) : GarbageCollector(heap, - name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), + name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"), mark_stack_(nullptr), is_large_object_space_immune_(false), to_space_(nullptr), diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index b0d7fb247a..d7dfcd4408 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -508,9 +508,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator(); obj = mirror::String::AllocEmptyString<true>(self, allocator_type); } else { - obj = AllocObjectFromCode<do_access_check, true>( - dex::TypeIndex(inst->VRegB_21c()), - shadow_frame.GetMethod(), + obj = AllocObjectFromCode<true>( + c.Ptr(), self, Runtime::Current()->GetHeap()->GetCurrentAllocator()); } diff --git a/runtime/interpreter/mterp/arm64/footer.S b/runtime/interpreter/mterp/arm64/footer.S index 6ffbd3f260..388fc8db74 100644 --- a/runtime/interpreter/mterp/arm64/footer.S +++ b/runtime/interpreter/mterp/arm64/footer.S @@ -267,13 +267,7 @@ MterpExceptionReturn: b MterpDone MterpReturn: ldr x2, [xFP, #OFF_FP_RESULT_REGISTER] - ldr lr, [xSELF, #THREAD_FLAGS_OFFSET] str x0, [x2] - mov x0, xSELF - ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST - b.eq check2 - bl MterpSuspendCheck // (self) -check2: mov x0, #1 // signal return to caller. MterpDone: /* diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S index 64772c804d..312fa9c009 100644 --- a/runtime/interpreter/mterp/mips64/footer.S +++ b/runtime/interpreter/mterp/mips64/footer.S @@ -222,13 +222,7 @@ MterpExceptionReturn: */ MterpReturn: ld a2, OFF_FP_RESULT_REGISTER(rFP) - lw ra, THREAD_FLAGS_OFFSET(rSELF) sd a0, 0(a2) - move a0, rSELF - and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST - beqzc ra, check2 - jal MterpSuspendCheck # (self) -check2: li v0, 1 # signal return to caller. MterpDone: /* diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index c8c1563ff6..369c2614a7 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -375,10 +375,9 @@ extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator(); obj = mirror::String::AllocEmptyString<true>(self, allocator_type); } else { - obj = AllocObjectFromCode<false, true>(dex::TypeIndex(inst->VRegB_21c()), - shadow_frame->GetMethod(), - self, - Runtime::Current()->GetHeap()->GetCurrentAllocator()); + obj = AllocObjectFromCode<true>(c, + self, + Runtime::Current()->GetHeap()->GetCurrentAllocator()); } } if (UNLIKELY(obj == nullptr)) { diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S index 34d99a8126..681790daa8 100644 --- a/runtime/interpreter/mterp/out/mterp_arm64.S +++ b/runtime/interpreter/mterp/out/mterp_arm64.S @@ -7246,13 +7246,7 @@ MterpExceptionReturn: b MterpDone MterpReturn: ldr x2, [xFP, #OFF_FP_RESULT_REGISTER] - ldr lr, [xSELF, #THREAD_FLAGS_OFFSET] str x0, [x2] - mov x0, xSELF - ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST - b.eq check2 - bl MterpSuspendCheck // (self) -check2: mov x0, #1 // signal return to caller. MterpDone: /* diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S index 037787f6b4..bf096664df 100644 --- a/runtime/interpreter/mterp/out/mterp_mips64.S +++ b/runtime/interpreter/mterp/out/mterp_mips64.S @@ -12293,13 +12293,7 @@ MterpExceptionReturn: */ MterpReturn: ld a2, OFF_FP_RESULT_REGISTER(rFP) - lw ra, THREAD_FLAGS_OFFSET(rSELF) sd a0, 0(a2) - move a0, rSELF - and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST - beqzc ra, check2 - jal MterpSuspendCheck # (self) -check2: li v0, 1 # signal return to caller. MterpDone: /* diff --git a/runtime/oat.cc b/runtime/oat.cc index cebe765369..1a07cdc7f3 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -469,10 +469,6 @@ bool OatHeader::IsPic() const { return IsKeyEnabled(OatHeader::kPicKey); } -bool OatHeader::HasPatchInfo() const { - return IsKeyEnabled(OatHeader::kHasPatchInfoKey); -} - bool OatHeader::IsDebuggable() const { return IsKeyEnabled(OatHeader::kDebuggableKey); } diff --git a/runtime/oat.h b/runtime/oat.h index 0f4cbbb767..dc103e2b52 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,13 +32,12 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '0', '9', '3', '\0' }; + static constexpr uint8_t kOatVersion[] = { '0', '9', '5', '\0' }; // alloc entrypoints change static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; static constexpr const char* kDex2OatHostKey = "dex2oat-host"; static constexpr const char* kPicKey = "pic"; - static constexpr const char* kHasPatchInfoKey = "has-patch-info"; static constexpr const char* kDebuggableKey = "debuggable"; static constexpr const char* kNativeDebuggableKey = "native-debuggable"; static constexpr const char* kCompilerFilter = "compiler-filter"; @@ -110,7 +109,6 @@ class PACKED(4) OatHeader { size_t GetHeaderSize() const; bool IsPic() const; - bool HasPatchInfo() const; bool IsDebuggable() const; bool IsNativeDebuggable() const; CompilerFilter::Filter GetCompilerFilter() const; diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 0bf713679b..38df427ed1 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -710,7 +710,7 @@ bool DlOpenOatFile::Dlopen(const std::string& elf_filename, return false; } #ifdef ART_TARGET_ANDROID - android_dlextinfo extinfo; + android_dlextinfo extinfo = {}; extinfo.flags = ANDROID_DLEXT_FORCE_LOAD | // Force-load, don't reuse handle // (open oat files multiple // times). @@ -1438,10 +1438,6 @@ void OatFile::OatMethod::LinkMethod(ArtMethod* method) const { method->SetEntryPointFromQuickCompiledCode(GetQuickCode()); } -bool OatFile::HasPatchInfo() const { - return GetOatHeader().HasPatchInfo(); -} - bool OatFile::IsPic() const { return GetOatHeader().IsPic(); // TODO: Check against oat_patches. b/18144996 diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 29add5b2b2..62d99fb51c 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -104,8 +104,6 @@ class OatFile { return is_executable_; } - bool HasPatchInfo() const; - bool IsPic() const; // Indicates whether the oat file was compiled with full debugging capability. diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index ee7cf9deef..f12a5e7742 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -203,10 +203,6 @@ OatFileAssistant::MakeUpToDate(bool profile_changed, std::string* error_msg) { case kDex2OatForRelocation: case kDex2OatForFilter: return GenerateOatFile(error_msg); - - case kPatchoatForRelocation: { - return RelocateOatFile(info.Filename(), error_msg); - } } UNREACHABLE(); } @@ -420,58 +416,6 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& } OatFileAssistant::ResultOfAttemptToUpdate -OatFileAssistant::RelocateOatFile(const std::string* input_file, std::string* error_msg) { - CHECK(error_msg != nullptr); - - if (input_file == nullptr) { - *error_msg = "Patching of oat file for dex location " + dex_location_ - + " not attempted because the input file name could not be determined."; - return kUpdateNotAttempted; - } - const std::string& input_file_name = *input_file; - - if (oat_.Filename() == nullptr) { - *error_msg = "Patching of oat file for dex location " + dex_location_ - + " not attempted because the oat file name could not be determined."; - return kUpdateNotAttempted; - } - const std::string& oat_file_name = *oat_.Filename(); - - const ImageInfo* image_info = GetImageInfo(); - Runtime* runtime = Runtime::Current(); - if (image_info == nullptr) { - *error_msg = "Patching of oat file " + oat_file_name - + " not attempted because no image location was found."; - return kUpdateNotAttempted; - } - - if (!runtime->IsDex2OatEnabled()) { - *error_msg = "Patching of oat file " + oat_file_name - + " not attempted because dex2oat is disabled"; - return kUpdateNotAttempted; - } - - std::vector<std::string> argv; - argv.push_back(runtime->GetPatchoatExecutable()); - argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(isa_))); - argv.push_back("--input-oat-file=" + input_file_name); - argv.push_back("--output-oat-file=" + oat_file_name); - argv.push_back("--patched-image-location=" + image_info->location); - - std::string command_line(android::base::Join(argv, ' ')); - if (!Exec(argv, error_msg)) { - // Manually delete the file. This ensures there is no garbage left over if - // the process unexpectedly died. - unlink(oat_file_name.c_str()); - return kUpdateFailed; - } - - // Mark that the oat file has changed and we should try to reload. - oat_.Reset(); - return kUpdateSucceeded; -} - -OatFileAssistant::ResultOfAttemptToUpdate OatFileAssistant::GenerateOatFile(std::string* error_msg) { CHECK(error_msg != nullptr); @@ -852,13 +796,7 @@ OatFileAssistant::DexOptNeeded OatFileAssistant::OatFileInfo::GetDexOptNeeded( return kNoDexOptNeeded; } - if (filter_okay && Status() == kOatRelocationOutOfDate && HasPatchInfo()) { - return kPatchoatForRelocation; - } - if (oat_file_assistant_->HasOriginalDexFiles()) { - // Run dex2oat for relocation if we didn't have the patch info necessary - // to use patchoat. if (filter_okay && Status() == kOatRelocationOutOfDate) { return kDex2OatForRelocation; } @@ -921,11 +859,6 @@ bool OatFileAssistant::OatFileInfo::IsExecutable() { return (file != nullptr && file->IsExecutable()); } -bool OatFileAssistant::OatFileInfo::HasPatchInfo() { - const OatFile* file = GetFile(); - return (file != nullptr && file->HasPatchInfo()); -} - void OatFileAssistant::OatFileInfo::Reset() { load_attempted_ = false; file_.reset(); diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h index bed1edc6c8..588a698be7 100644 --- a/runtime/oat_file_assistant.h +++ b/runtime/oat_file_assistant.h @@ -67,14 +67,9 @@ class OatFileAssistant { kDex2OatForFilter = 3, // dex2oat should be run to update the apk/jar because the existing code - // is not relocated to match the boot image and does not have the - // necessary patch information to use patchoat. + // is not relocated to match the boot image. // Matches Java: dalvik.system.DexFile.DEX2OAT_FOR_RELOCATION kDex2OatForRelocation = 4, - - // patchoat should be run to update the apk/jar. - // Matches Java: dalvik.system.DexFile.PATCHOAT_FOR_RELOCATION - kPatchoatForRelocation = 5, }; enum OatStatus { @@ -237,15 +232,6 @@ class OatFileAssistant { // Returns the status of the oat file for the dex location. OatStatus OatFileStatus(); - // Generates the oat file by relocation from the named input file. - // This does not check the current status before attempting to relocate the - // oat file. - // - // If the result is not kUpdateSucceeded, the value of error_msg will be set - // to a string describing why there was a failure or the update was not - // attempted. error_msg must not be null. - ResultOfAttemptToUpdate RelocateOatFile(const std::string* input_file, std::string* error_msg); - // Generate the oat file from the dex file using the current runtime // compiler options. // This does not check the current status before attempting to generate the @@ -328,8 +314,6 @@ class OatFileAssistant { // given target_compilation_filter. // profile_changed should be true to indicate the profile has recently // changed for this dex location. - // If patchoat is needed, this function will return the kPatchOatNeeded - // status, not the kSelfPatchOatNeeded status. DexOptNeeded GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter, bool profile_changed); @@ -341,9 +325,6 @@ class OatFileAssistant { // Returns true if the file is opened executable. bool IsExecutable(); - // Returns true if the file has patch info required to run patchoat. - bool HasPatchInfo(); - // Clear any cached information about the file that depends on the // contents of the file. This does not reset the provided filename. void Reset(); diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index 26dbaab367..afa804c08c 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -85,7 +85,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { CompilerFilter::Filter filter, bool relocate, bool pic, - bool with_patch_info, bool with_alternate_image) { std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA)); std::string dalvik_cache_tmp = dalvik_cache + ".redirected"; @@ -111,10 +110,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { args.push_back("--compile-pic"); } - if (with_patch_info) { - args.push_back("--include-patch-information"); - } - std::string image_location = GetImageLocation(); if (with_alternate_image) { args.push_back("--boot-image=" + GetImageLocation2()); @@ -139,7 +134,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { &error_msg)); ASSERT_TRUE(odex_file.get() != nullptr) << error_msg; EXPECT_EQ(pic, odex_file->IsPic()); - EXPECT_EQ(with_patch_info, odex_file->HasPatchInfo()); EXPECT_EQ(filter, odex_file->GetCompilerFilter()); std::unique_ptr<ImageHeader> image_header( @@ -181,7 +175,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { filter, /*relocate*/false, /*pic*/false, - /*with_patch_info*/true, /*with_alternate_image*/false); } @@ -193,21 +186,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { filter, /*relocate*/false, /*pic*/true, - /*with_patch_info*/false, - /*with_alternate_image*/false); - } - - // Generate a non-PIC odex file without patch information for the purposes - // of test. The generated odex file will be un-relocated. - void GenerateNoPatchOdexForTest(const std::string& dex_location, - const std::string& odex_location, - CompilerFilter::Filter filter) { - GenerateOatForTest(dex_location, - odex_location, - filter, - /*relocate*/false, - /*pic*/false, - /*with_patch_info*/false, /*with_alternate_image*/false); } @@ -216,7 +194,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { CompilerFilter::Filter filter, bool relocate, bool pic, - bool with_patch_info, bool with_alternate_image) { std::string oat_location; std::string error_msg; @@ -227,7 +204,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { filter, relocate, pic, - with_patch_info, with_alternate_image); } @@ -237,7 +213,6 @@ class OatFileAssistantTest : public Dex2oatEnvironmentTest { filter, /*relocate*/true, /*pic*/false, - /*with_patch_info*/false, /*with_alternate_image*/false); } @@ -519,7 +494,6 @@ TEST_F(OatFileAssistantTest, OatImageOutOfDate) { CompilerFilter::kSpeed, /*relocate*/true, /*pic*/false, - /*with_patch_info*/false, /*with_alternate_image*/true); OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false); @@ -548,7 +522,6 @@ TEST_F(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) { CompilerFilter::kVerifyAtRuntime, /*relocate*/true, /*pic*/false, - /*with_patch_info*/false, /*with_alternate_image*/true); OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false); @@ -564,7 +537,6 @@ TEST_F(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) { } // Case: We have a DEX file and an ODEX file, but no OAT file. -// Expect: The status is kPatchOatNeeded. TEST_F(OatFileAssistantTest, DexOdexNoOat) { std::string dex_location = GetScratchDir() + "/DexOdexNoOat.jar"; std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex"; @@ -578,7 +550,7 @@ TEST_F(OatFileAssistantTest, DexOdexNoOat) { EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime)); - EXPECT_EQ(-OatFileAssistant::kPatchoatForRelocation, + EXPECT_EQ(-OatFileAssistant::kDex2OatForRelocation, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); @@ -591,15 +563,14 @@ TEST_F(OatFileAssistantTest, DexOdexNoOat) { ASSERT_TRUE(oat_file.get() != nullptr); } -// Case: We have a stripped DEX file and an ODEX file, but no OAT file. -// Expect: The status is kPatchOatNeeded +// Case: We have a stripped DEX file and a PIC ODEX file, but no OAT file. TEST_F(OatFileAssistantTest, StrippedDexOdexNoOat) { std::string dex_location = GetScratchDir() + "/StrippedDexOdexNoOat.jar"; std::string odex_location = GetOdexDir() + "/StrippedDexOdexNoOat.odex"; // Create the dex and odex files Copy(GetDexSrc1(), dex_location); - GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed); + GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed); // Strip the dex file Copy(GetStrippedDexSrc1(), dex_location); @@ -607,28 +578,14 @@ TEST_F(OatFileAssistantTest, StrippedDexOdexNoOat) { // Verify the status. OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true); - EXPECT_EQ(-OatFileAssistant::kPatchoatForRelocation, + EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); - EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OdexFileStatus()); + EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus()); EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus()); EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles()); - // Make the oat file up to date. - std::string error_msg; - Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); - ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(false, &error_msg)) << error_msg; - - EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); - - EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); - EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OdexFileStatus()); - EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OatFileStatus()); - EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles()); - // Verify we can load the dex files from it. std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile(); ASSERT_TRUE(oat_file.get() != nullptr); @@ -638,8 +595,7 @@ TEST_F(OatFileAssistantTest, StrippedDexOdexNoOat) { EXPECT_EQ(1u, dex_files.size()); } -// Case: We have a stripped DEX file, an ODEX file, and an out-of-date OAT file. -// Expect: The status is kPatchOatNeeded. +// Case: We have a stripped DEX file, a PIC ODEX file, and an out-of-date OAT file. TEST_F(OatFileAssistantTest, StrippedDexOdexOat) { std::string dex_location = GetScratchDir() + "/StrippedDexOdexOat.jar"; std::string odex_location = GetOdexDir() + "/StrippedDexOdexOat.odex"; @@ -650,7 +606,7 @@ TEST_F(OatFileAssistantTest, StrippedDexOdexOat) { // Create the odex file Copy(GetDexSrc1(), dex_location); - GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed); + GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed); // Strip the dex file. Copy(GetStrippedDexSrc1(), dex_location); @@ -660,30 +616,14 @@ TEST_F(OatFileAssistantTest, StrippedDexOdexOat) { EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerifyAtRuntime)); - EXPECT_EQ(-OatFileAssistant::kPatchoatForRelocation, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); - EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, // Can't run dex2oat because dex file is stripped. - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything)); - - EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); - EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OdexFileStatus()); - EXPECT_EQ(OatFileAssistant::kOatDexOutOfDate, oat_file_assistant.OatFileStatus()); - EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles()); - - // Make the oat file up to date. - std::string error_msg; - Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); - ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(false, &error_msg)) << error_msg; - EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, // Can't run dex2oat because dex file is stripped. oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything)); EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); - EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OdexFileStatus()); - EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OatFileStatus()); + EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus()); + EXPECT_EQ(OatFileAssistant::kOatDexOutOfDate, oat_file_assistant.OatFileStatus()); EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles()); // Verify we can load the dex files from it. @@ -732,90 +672,9 @@ TEST_F(OatFileAssistantTest, ResourceOnlyDex) { EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles()); } -// Case: We have a DEX file, no ODEX file and an OAT file that needs -// relocation. -// Expect: The status is kSelfPatchOatNeeded. -TEST_F(OatFileAssistantTest, SelfRelocation) { - std::string dex_location = GetScratchDir() + "/SelfRelocation.jar"; - std::string oat_location = GetOdexDir() + "/SelfRelocation.oat"; - - // Create the dex and odex files - Copy(GetDexSrc1(), dex_location); - GenerateOdexForTest(dex_location, oat_location, CompilerFilter::kSpeed); - - OatFileAssistant oat_file_assistant(dex_location.c_str(), - oat_location.c_str(), kRuntimeISA, true); - - EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly)); - EXPECT_EQ(OatFileAssistant::kPatchoatForRelocation, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); - EXPECT_EQ(OatFileAssistant::kDex2OatForFilter, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything)); - - EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); - EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus()); - EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OatFileStatus()); - EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles()); - - // Make the oat file up to date. - std::string error_msg; - Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); - ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(false, &error_msg)) << error_msg; - - EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); - - EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); - EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus()); - EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OatFileStatus()); - EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles()); - - std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile(); - ASSERT_TRUE(oat_file.get() != nullptr); - EXPECT_TRUE(oat_file->IsExecutable()); - std::vector<std::unique_ptr<const DexFile>> dex_files; - dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str()); - EXPECT_EQ(1u, dex_files.size()); -} - -// Case: We have a DEX file, no ODEX file and an OAT file that needs -// relocation but doesn't have patch info. -// Expect: The status is kDex2OatNeeded, because we can't run patchoat. -TEST_F(OatFileAssistantTest, NoSelfRelocation) { - std::string dex_location = GetScratchDir() + "/NoSelfRelocation.jar"; - std::string oat_location = GetOdexDir() + "/NoSelfRelocation.oat"; - - // Create the dex and odex files - Copy(GetDexSrc1(), dex_location); - GenerateNoPatchOdexForTest(dex_location, oat_location, CompilerFilter::kSpeed); - - OatFileAssistant oat_file_assistant(dex_location.c_str(), - oat_location.c_str(), kRuntimeISA, true); - - EXPECT_EQ(OatFileAssistant::kDex2OatForRelocation, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); - - // Make the oat file up to date. - std::string error_msg; - Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); - ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(false, &error_msg)) << error_msg; - EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, - oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); - - std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile(); - ASSERT_TRUE(oat_file.get() != nullptr); - EXPECT_TRUE(oat_file->IsExecutable()); - std::vector<std::unique_ptr<const DexFile>> dex_files; - dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str()); - EXPECT_EQ(1u, dex_files.size()); -} - // Case: We have a DEX file, an ODEX file and an OAT file, where the ODEX and // OAT files both have patch delta of 0. -// Expect: It shouldn't crash, and status is kSelfPatchOatNeeded. +// Expect: It shouldn't crash. TEST_F(OatFileAssistantTest, OdexOatOverlap) { std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar"; std::string odex_location = GetOdexDir() + "/OdexOatOverlap.odex"; @@ -833,10 +692,10 @@ TEST_F(OatFileAssistantTest, OdexOatOverlap) { OatFileAssistant oat_file_assistant(dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true); - // kPatchoatForRelocation is expected rather than -kPatchoatForRelocation + // kDex2OatForRelocation is expected rather than -kDex2OatForRelocation // based on the assumption that the oat location is more up-to-date than the odex // location, even if they both need relocation. - EXPECT_EQ(OatFileAssistant::kPatchoatForRelocation, + EXPECT_EQ(OatFileAssistant::kDex2OatForRelocation, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); EXPECT_FALSE(oat_file_assistant.IsInBootClassPath()); @@ -1285,7 +1144,6 @@ TEST_F(OatFileAssistantTest, DexOptStatusValues) { {OatFileAssistant::kDex2OatForBootImage, "DEX2OAT_FOR_BOOT_IMAGE"}, {OatFileAssistant::kDex2OatForFilter, "DEX2OAT_FOR_FILTER"}, {OatFileAssistant::kDex2OatForRelocation, "DEX2OAT_FOR_RELOCATION"}, - {OatFileAssistant::kPatchoatForRelocation, "PATCHOAT_FOR_RELOCATION"} }; ScopedObjectAccess soa(Thread::Current()); diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp index 6105fecbe5..be06dd7b4c 100644 --- a/runtime/openjdkjvmti/Android.bp +++ b/runtime/openjdkjvmti/Android.bp @@ -24,6 +24,8 @@ cc_defaults { "ti_field.cc", "ti_heap.cc", "ti_method.cc", + "ti_object.cc", + "ti_properties.cc", "ti_stack.cc", "ti_redefine.cc", "transform.cc"], diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc index faaeff361d..936049fe3d 100644 --- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc +++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc @@ -50,6 +50,8 @@ #include "ti_field.h" #include "ti_heap.h" #include "ti_method.h" +#include "ti_object.h" +#include "ti_properties.h" #include "ti_redefine.h" #include "ti_stack.h" #include "transform.h" @@ -598,7 +600,7 @@ class JvmtiFunctions { static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_modifiable_class_ptr) { - return ERR(NOT_IMPLEMENTED); + return Redefiner::IsModifiableClass(env, klass, is_modifiable_class_ptr); } static jvmtiError GetClassLoader(jvmtiEnv* env, jclass klass, jobject* classloader_ptr) { @@ -622,11 +624,11 @@ class JvmtiFunctions { } static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr) { - return ERR(NOT_IMPLEMENTED); + return ObjectUtil::GetObjectSize(env, object, size_ptr); } static jvmtiError GetObjectHashCode(jvmtiEnv* env, jobject object, jint* hash_code_ptr) { - return ERR(NOT_IMPLEMENTED); + return ObjectUtil::GetObjectHashCode(env, object, hash_code_ptr); } static jvmtiError GetObjectMonitorUsage(jvmtiEnv* env, @@ -688,13 +690,13 @@ class JvmtiFunctions { static jvmtiError GetMaxLocals(jvmtiEnv* env, jmethodID method, jint* max_ptr) { - return ERR(NOT_IMPLEMENTED); + return MethodUtil::GetMaxLocals(env, method, max_ptr); } static jvmtiError GetArgumentsSize(jvmtiEnv* env, jmethodID method, jint* size_ptr) { - return ERR(NOT_IMPLEMENTED); + return MethodUtil::GetArgumentsSize(env, method, size_ptr); } static jvmtiError GetLineNumberTable(jvmtiEnv* env, @@ -708,7 +710,7 @@ class JvmtiFunctions { jmethodID method, jlocation* start_location_ptr, jlocation* end_location_ptr) { - return ERR(NOT_IMPLEMENTED); + return MethodUtil::GetMethodLocation(env, method, start_location_ptr, end_location_ptr); } static jvmtiError GetLocalVariableTable(jvmtiEnv* env, @@ -726,15 +728,15 @@ class JvmtiFunctions { } static jvmtiError IsMethodNative(jvmtiEnv* env, jmethodID method, jboolean* is_native_ptr) { - return ERR(NOT_IMPLEMENTED); + return MethodUtil::IsMethodNative(env, method, is_native_ptr); } static jvmtiError IsMethodSynthetic(jvmtiEnv* env, jmethodID method, jboolean* is_synthetic_ptr) { - return ERR(NOT_IMPLEMENTED); + return MethodUtil::IsMethodSynthetic(env, method, is_synthetic_ptr); } static jvmtiError IsMethodObsolete(jvmtiEnv* env, jmethodID method, jboolean* is_obsolete_ptr) { - return ERR(NOT_IMPLEMENTED); + return MethodUtil::IsMethodObsolete(env, method, is_obsolete_ptr); } static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix) { @@ -839,19 +841,28 @@ class JvmtiFunctions { static jvmtiError GetExtensionFunctions(jvmtiEnv* env, jint* extension_count_ptr, jvmtiExtensionFunctionInfo** extensions) { - return ERR(NOT_IMPLEMENTED); + // We do not have any extension functions. + *extension_count_ptr = 0; + *extensions = nullptr; + + return ERR(NONE); } static jvmtiError GetExtensionEvents(jvmtiEnv* env, jint* extension_count_ptr, jvmtiExtensionEventInfo** extensions) { - return ERR(NOT_IMPLEMENTED); + // We do not have any extension events. + *extension_count_ptr = 0; + *extensions = nullptr; + + return ERR(NONE); } static jvmtiError SetExtensionEventCallback(jvmtiEnv* env, jint extension_event_index, jvmtiExtensionEvent callback) { - return ERR(NOT_IMPLEMENTED); + // We do not have any extension events, so any call is illegal. + return ERR(ILLEGAL_ARGUMENT); } static jvmtiError GetPotentialCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) { @@ -1024,15 +1035,15 @@ class JvmtiFunctions { } static jvmtiError GetSystemProperties(jvmtiEnv* env, jint* count_ptr, char*** property_ptr) { - return ERR(NOT_IMPLEMENTED); + return PropertiesUtil::GetSystemProperties(env, count_ptr, property_ptr); } static jvmtiError GetSystemProperty(jvmtiEnv* env, const char* property, char** value_ptr) { - return ERR(NOT_IMPLEMENTED); + return PropertiesUtil::GetSystemProperty(env, property, value_ptr); } static jvmtiError SetSystemProperty(jvmtiEnv* env, const char* property, const char* value) { - return ERR(NOT_IMPLEMENTED); + return PropertiesUtil::SetSystemProperty(env, property, value); } static jvmtiError GetPhase(jvmtiEnv* env, jvmtiPhase* phase_ptr) { @@ -1180,7 +1191,7 @@ class JvmtiFunctions { reinterpret_cast<uint8_t*>(dex_file), &error); if (ret != OK) { - LOG(ERROR) << "FAILURE TO REDEFINE " << error; + LOG(WARNING) << "FAILURE TO REDEFINE " << error; } return ret; } diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h index 48b29a3ac1..5eadc5a8e0 100644 --- a/runtime/openjdkjvmti/art_jvmti.h +++ b/runtime/openjdkjvmti/art_jvmti.h @@ -105,9 +105,10 @@ class JvmtiDeleter { using JvmtiUniquePtr = std::unique_ptr<unsigned char, JvmtiDeleter>; +template <typename T> ALWAYS_INLINE -static inline JvmtiUniquePtr MakeJvmtiUniquePtr(jvmtiEnv* env, unsigned char* mem) { - return JvmtiUniquePtr(mem, JvmtiDeleter(env)); +static inline JvmtiUniquePtr MakeJvmtiUniquePtr(jvmtiEnv* env, T* mem) { + return JvmtiUniquePtr(reinterpret_cast<unsigned char*>(mem), JvmtiDeleter(env)); } ALWAYS_INLINE diff --git a/runtime/openjdkjvmti/ti_method.cc b/runtime/openjdkjvmti/ti_method.cc index 02b609049f..2ddd64a2bc 100644 --- a/runtime/openjdkjvmti/ti_method.cc +++ b/runtime/openjdkjvmti/ti_method.cc @@ -41,6 +41,64 @@ namespace openjdkjvmti { +jvmtiError MethodUtil::GetArgumentsSize(jvmtiEnv* env ATTRIBUTE_UNUSED, + jmethodID method, + jint* size_ptr) { + if (method == nullptr) { + return ERR(INVALID_METHODID); + } + art::ArtMethod* art_method = art::jni::DecodeArtMethod(method); + + if (art_method->IsNative()) { + return ERR(NATIVE_METHOD); + } + + if (size_ptr == nullptr) { + return ERR(NULL_POINTER); + } + + art::ScopedObjectAccess soa(art::Thread::Current()); + if (art_method->IsProxyMethod() || art_method->IsAbstract()) { + // This isn't specified as an error case, so return 0. + *size_ptr = 0; + return ERR(NONE); + } + + DCHECK_NE(art_method->GetCodeItemOffset(), 0u); + *size_ptr = art_method->GetCodeItem()->ins_size_; + + return ERR(NONE); +} + +jvmtiError MethodUtil::GetMaxLocals(jvmtiEnv* env ATTRIBUTE_UNUSED, + jmethodID method, + jint* max_ptr) { + if (method == nullptr) { + return ERR(INVALID_METHODID); + } + art::ArtMethod* art_method = art::jni::DecodeArtMethod(method); + + if (art_method->IsNative()) { + return ERR(NATIVE_METHOD); + } + + if (max_ptr == nullptr) { + return ERR(NULL_POINTER); + } + + art::ScopedObjectAccess soa(art::Thread::Current()); + if (art_method->IsProxyMethod() || art_method->IsAbstract()) { + // This isn't specified as an error case, so return 0. + *max_ptr = 0; + return ERR(NONE); + } + + DCHECK_NE(art_method->GetCodeItemOffset(), 0u); + *max_ptr = art_method->GetCodeItem()->registers_size_; + + return ERR(NONE); +} + jvmtiError MethodUtil::GetMethodName(jvmtiEnv* env, jmethodID method, char** name_ptr, @@ -107,6 +165,38 @@ jvmtiError MethodUtil::GetMethodDeclaringClass(jvmtiEnv* env ATTRIBUTE_UNUSED, return ERR(NONE); } +jvmtiError MethodUtil::GetMethodLocation(jvmtiEnv* env ATTRIBUTE_UNUSED, + jmethodID method, + jlocation* start_location_ptr, + jlocation* end_location_ptr) { + if (method == nullptr) { + return ERR(INVALID_METHODID); + } + art::ArtMethod* art_method = art::jni::DecodeArtMethod(method); + + if (art_method->IsNative()) { + return ERR(NATIVE_METHOD); + } + + if (start_location_ptr == nullptr || end_location_ptr == nullptr) { + return ERR(NULL_POINTER); + } + + art::ScopedObjectAccess soa(art::Thread::Current()); + if (art_method->IsProxyMethod() || art_method->IsAbstract()) { + // This isn't specified as an error case, so return 0/0. + *start_location_ptr = 0; + *end_location_ptr = 0; + return ERR(NONE); + } + + DCHECK_NE(art_method->GetCodeItemOffset(), 0u); + *start_location_ptr = 0; + *end_location_ptr = art_method->GetCodeItem()->insns_size_in_code_units_ - 1; + + return ERR(NONE); +} + jvmtiError MethodUtil::GetMethodModifiers(jvmtiEnv* env ATTRIBUTE_UNUSED, jmethodID method, jint* modifiers_ptr) { @@ -190,4 +280,43 @@ jvmtiError MethodUtil::GetLineNumberTable(jvmtiEnv* env, return ERR(NONE); } +template <typename T> +static jvmtiError IsMethodT(jvmtiEnv* env ATTRIBUTE_UNUSED, + jmethodID method, + T test, + jboolean* is_t_ptr) { + if (method == nullptr) { + return ERR(INVALID_METHODID); + } + if (is_t_ptr == nullptr) { + return ERR(NULL_POINTER); + } + + art::ArtMethod* art_method = art::jni::DecodeArtMethod(method); + *is_t_ptr = test(art_method) ? JNI_TRUE : JNI_FALSE; + + return ERR(NONE); +} + +jvmtiError MethodUtil::IsMethodNative(jvmtiEnv* env, jmethodID m, jboolean* is_native_ptr) { + auto test = [](art::ArtMethod* method) { + return method->IsNative(); + }; + return IsMethodT(env, m, test, is_native_ptr); +} + +jvmtiError MethodUtil::IsMethodObsolete(jvmtiEnv* env, jmethodID m, jboolean* is_obsolete_ptr) { + auto test = [](art::ArtMethod* method) { + return method->IsObsolete(); + }; + return IsMethodT(env, m, test, is_obsolete_ptr); +} + +jvmtiError MethodUtil::IsMethodSynthetic(jvmtiEnv* env, jmethodID m, jboolean* is_synthetic_ptr) { + auto test = [](art::ArtMethod* method) { + return method->IsSynthetic(); + }; + return IsMethodT(env, m, test, is_synthetic_ptr); +} + } // namespace openjdkjvmti diff --git a/runtime/openjdkjvmti/ti_method.h b/runtime/openjdkjvmti/ti_method.h index fb2fbb2b27..e5c1705ada 100644 --- a/runtime/openjdkjvmti/ti_method.h +++ b/runtime/openjdkjvmti/ti_method.h @@ -39,6 +39,10 @@ namespace openjdkjvmti { class MethodUtil { public: + static jvmtiError GetArgumentsSize(jvmtiEnv* env, jmethodID method, jint* size_ptr); + + static jvmtiError GetMaxLocals(jvmtiEnv* env, jmethodID method, jint* max_ptr); + static jvmtiError GetMethodName(jvmtiEnv* env, jmethodID method, char** name_ptr, @@ -49,6 +53,11 @@ class MethodUtil { jmethodID method, jclass* declaring_class_ptr); + static jvmtiError GetMethodLocation(jvmtiEnv* env, + jmethodID method, + jlocation* start_location_ptr, + jlocation* end_location_ptr); + static jvmtiError GetMethodModifiers(jvmtiEnv* env, jmethodID method, jint* modifiers_ptr); @@ -57,6 +66,10 @@ class MethodUtil { jmethodID method, jint* entry_count_ptr, jvmtiLineNumberEntry** table_ptr); + + static jvmtiError IsMethodNative(jvmtiEnv* env, jmethodID method, jboolean* is_native_ptr); + static jvmtiError IsMethodObsolete(jvmtiEnv* env, jmethodID method, jboolean* is_obsolete_ptr); + static jvmtiError IsMethodSynthetic(jvmtiEnv* env, jmethodID method, jboolean* is_synthetic_ptr); }; } // namespace openjdkjvmti diff --git a/runtime/openjdkjvmti/ti_object.cc b/runtime/openjdkjvmti/ti_object.cc new file mode 100644 index 0000000000..bf84499035 --- /dev/null +++ b/runtime/openjdkjvmti/ti_object.cc @@ -0,0 +1,76 @@ +/* Copyright (C) 2017 The Android Open Source Project + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This file implements interfaces from the file jvmti.h. This implementation + * is licensed under the same terms as the file jvmti.h. The + * copyright and license information for the file jvmti.h follows. + * + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "ti_object.h" + +#include "art_jvmti.h" +#include "mirror/object-inl.h" +#include "scoped_thread_state_change-inl.h" +#include "thread-inl.h" + +namespace openjdkjvmti { + +jvmtiError ObjectUtil::GetObjectSize(jvmtiEnv* env ATTRIBUTE_UNUSED, + jobject jobject, + jlong* size_ptr) { + if (jobject == nullptr) { + return ERR(INVALID_OBJECT); + } + if (size_ptr == nullptr) { + return ERR(NULL_POINTER); + } + + art::ScopedObjectAccess soa(art::Thread::Current()); + art::ObjPtr<art::mirror::Object> object = soa.Decode<art::mirror::Object>(jobject); + + *size_ptr = object->SizeOf(); + return ERR(NONE); +} + +jvmtiError ObjectUtil::GetObjectHashCode(jvmtiEnv* env ATTRIBUTE_UNUSED, + jobject jobject, + jint* hash_code_ptr) { + if (jobject == nullptr) { + return ERR(INVALID_OBJECT); + } + if (hash_code_ptr == nullptr) { + return ERR(NULL_POINTER); + } + + art::ScopedObjectAccess soa(art::Thread::Current()); + art::ObjPtr<art::mirror::Object> object = soa.Decode<art::mirror::Object>(jobject); + + *hash_code_ptr = object->IdentityHashCode(); + + return ERR(NONE); +} + +} // namespace openjdkjvmti diff --git a/runtime/openjdkjvmti/ti_object.h b/runtime/openjdkjvmti/ti_object.h new file mode 100644 index 0000000000..09eee61bdb --- /dev/null +++ b/runtime/openjdkjvmti/ti_object.h @@ -0,0 +1,49 @@ +/* Copyright (C) 2017 The Android Open Source Project + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This file implements interfaces from the file jvmti.h. This implementation + * is licensed under the same terms as the file jvmti.h. The + * copyright and license information for the file jvmti.h follows. + * + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_OBJECT_H_ +#define ART_RUNTIME_OPENJDKJVMTI_TI_OBJECT_H_ + +#include "jni.h" +#include "jvmti.h" + +namespace openjdkjvmti { + +class ObjectUtil { + public: + static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr); + + static jvmtiError GetObjectHashCode(jvmtiEnv* env, jobject object, jint* hash_code_ptr); +}; + +} // namespace openjdkjvmti + +#endif // ART_RUNTIME_OPENJDKJVMTI_TI_OBJECT_H_ diff --git a/runtime/openjdkjvmti/ti_properties.cc b/runtime/openjdkjvmti/ti_properties.cc new file mode 100644 index 0000000000..46b9e71b13 --- /dev/null +++ b/runtime/openjdkjvmti/ti_properties.cc @@ -0,0 +1,192 @@ +/* Copyright (C) 2017 The Android Open Source Project + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This file implements interfaces from the file jvmti.h. This implementation + * is licensed under the same terms as the file jvmti.h. The + * copyright and license information for the file jvmti.h follows. + * + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "ti_properties.h" + +#include <string.h> +#include <vector> + +#include "art_jvmti.h" +#include "runtime.h" + +namespace openjdkjvmti { + +// Hardcoded properties. Tests ensure that these are consistent with libcore's view, as seen +// in System.java and AndroidHardcodedSystemProperties.java. +static constexpr const char* kProperties[][2] = { + // Recommended by the spec. + { "java.vm.vendor", "The Android Project" }, + { "java.vm.version", "2.1.0" }, // This is Runtime::GetVersion(). + { "java.vm.name", "Dalvik" }, + // Android does not provide java.vm.info. + // + // These are other values provided by AndroidHardcodedSystemProperties. + { "java.class.version", "50.0" }, + { "java.version", "0" }, + { "java.compiler", "" }, + { "java.ext.dirs", "" }, + + { "java.specification.name", "Dalvik Core Library" }, + { "java.specification.vendor", "The Android Project" }, + { "java.specification.version", "0.9" }, + + { "java.vendor", "The Android Project" }, + { "java.vendor.url", "http://www.android.com/" }, + { "java.vm.name", "Dalvik" }, + { "java.vm.specification.name", "Dalvik Virtual Machine Specification" }, + { "java.vm.specification.vendor", "The Android Project" }, + { "java.vm.specification.version", "0.9" }, + { "java.vm.vendor", "The Android Project" }, + + { "java.vm.vendor.url", "http://www.android.com/" }, + + { "java.net.preferIPv6Addresses", "false" }, + + { "file.encoding", "UTF-8" }, + + { "file.separator", "/" }, + { "line.separator", "\n" }, + { "path.separator", ":" }, + + { "os.name", "Linux" }, +}; +static constexpr size_t kPropertiesSize = arraysize(kProperties); +static constexpr const char* kPropertyLibraryPath = "java.library.path"; +static constexpr const char* kPropertyClassPath = "java.class.path"; + +static jvmtiError Copy(jvmtiEnv* env, const char* in, char** out) { + unsigned char* data = nullptr; + jvmtiError result = CopyString(env, in, &data); + *out = reinterpret_cast<char*>(data); + return result; +} + +jvmtiError PropertiesUtil::GetSystemProperties(jvmtiEnv* env, + jint* count_ptr, + char*** property_ptr) { + if (count_ptr == nullptr || property_ptr == nullptr) { + return ERR(NULL_POINTER); + } + unsigned char* array_data; + jvmtiError array_alloc_result = env->Allocate((kPropertiesSize + 2) * sizeof(char*), &array_data); + if (array_alloc_result != ERR(NONE)) { + return array_alloc_result; + } + JvmtiUniquePtr array_data_ptr = MakeJvmtiUniquePtr(env, array_data); + char** array = reinterpret_cast<char**>(array_data); + + std::vector<JvmtiUniquePtr> property_copies; + + { + char* libpath_data; + jvmtiError libpath_result = Copy(env, kPropertyLibraryPath, &libpath_data); + if (libpath_result != ERR(NONE)) { + return libpath_result; + } + array[0] = libpath_data; + property_copies.push_back(MakeJvmtiUniquePtr(env, libpath_data)); + } + + { + char* classpath_data; + jvmtiError classpath_result = Copy(env, kPropertyClassPath, &classpath_data); + if (classpath_result != ERR(NONE)) { + return classpath_result; + } + array[1] = classpath_data; + property_copies.push_back(MakeJvmtiUniquePtr(env, classpath_data)); + } + + for (size_t i = 0; i != kPropertiesSize; ++i) { + char* data; + jvmtiError data_result = Copy(env, kProperties[i][0], &data); + if (data_result != ERR(NONE)) { + return data_result; + } + array[i + 2] = data; + property_copies.push_back(MakeJvmtiUniquePtr(env, data)); + } + + // Everything is OK, release the data. + array_data_ptr.release(); + for (auto& uptr : property_copies) { + uptr.release(); + } + + *count_ptr = kPropertiesSize + 2; + *property_ptr = array; + + return ERR(NONE); +} + +jvmtiError PropertiesUtil::GetSystemProperty(jvmtiEnv* env, + const char* property, + char** value_ptr) { + if (property == nullptr || value_ptr == nullptr) { + return ERR(NULL_POINTER); + } + + if (strcmp(property, kPropertyLibraryPath) == 0) { + // TODO: In the live phase, we should probably compare to System.getProperty. java.library.path + // may not be set initially, and is then freely modifiable. + const std::vector<std::string>& runtime_props = art::Runtime::Current()->GetProperties(); + for (const std::string& prop_assignment : runtime_props) { + size_t assign_pos = prop_assignment.find('='); + if (assign_pos != std::string::npos && assign_pos > 0) { + if (prop_assignment.substr(0, assign_pos) == kPropertyLibraryPath) { + return Copy(env, prop_assignment.substr(assign_pos + 1).c_str(), value_ptr); + } + } + } + return ERR(NOT_AVAILABLE); + } + + if (strcmp(property, kPropertyClassPath) == 0) { + return Copy(env, art::Runtime::Current()->GetClassPathString().c_str(), value_ptr); + } + + for (size_t i = 0; i != kPropertiesSize; ++i) { + if (strcmp(property, kProperties[i][0]) == 0) { + return Copy(env, kProperties[i][1], value_ptr); + } + } + + return ERR(NOT_AVAILABLE); +} + +jvmtiError PropertiesUtil::SetSystemProperty(jvmtiEnv* env ATTRIBUTE_UNUSED, + const char* property ATTRIBUTE_UNUSED, + const char* value ATTRIBUTE_UNUSED) { + // We do not allow manipulation of any property here. + return ERR(NOT_AVAILABLE); +} + +} // namespace openjdkjvmti diff --git a/runtime/openjdkjvmti/ti_properties.h b/runtime/openjdkjvmti/ti_properties.h new file mode 100644 index 0000000000..70734813bd --- /dev/null +++ b/runtime/openjdkjvmti/ti_properties.h @@ -0,0 +1,51 @@ +/* Copyright (C) 2017 The Android Open Source Project + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This file implements interfaces from the file jvmti.h. This implementation + * is licensed under the same terms as the file jvmti.h. The + * copyright and license information for the file jvmti.h follows. + * + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_PROPERTIES_H_ +#define ART_RUNTIME_OPENJDKJVMTI_TI_PROPERTIES_H_ + +#include "jni.h" +#include "jvmti.h" + +namespace openjdkjvmti { + +class PropertiesUtil { + public: + static jvmtiError GetSystemProperties(jvmtiEnv* env, jint* count_ptr, char*** property_ptr); + + static jvmtiError GetSystemProperty(jvmtiEnv* env, const char* property, char** value_ptr); + + static jvmtiError SetSystemProperty(jvmtiEnv* env, const char* property, const char* value); +}; + +} // namespace openjdkjvmti + +#endif // ART_RUNTIME_OPENJDKJVMTI_TI_PROPERTIES_H_ diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc index 926819d1d0..5bf844564c 100644 --- a/runtime/openjdkjvmti/ti_redefine.cc +++ b/runtime/openjdkjvmti/ti_redefine.cc @@ -37,6 +37,8 @@ #include "art_jvmti.h" #include "base/logging.h" +#include "dex_file.h" +#include "dex_file_types.h" #include "events-inl.h" #include "gc/allocation_listener.h" #include "gc/heap.h" @@ -64,19 +66,14 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor { art::Thread* thread, art::LinearAlloc* allocator, const std::unordered_set<art::ArtMethod*>& obsoleted_methods, - /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps, - /*out*/bool* success, - /*out*/std::string* error_msg) + /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps) : StackVisitor(thread, /*context*/nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), allocator_(allocator), obsoleted_methods_(obsoleted_methods), obsolete_maps_(obsolete_maps), - success_(success), - is_runtime_frame_(false), - error_msg_(error_msg) { - *success_ = true; + is_runtime_frame_(false) { } ~ObsoleteMethodStackVisitor() OVERRIDE {} @@ -85,34 +82,17 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor { // Returns true if we successfully installed obsolete methods on this thread, filling // obsolete_maps_ with the translations if needed. Returns false and fills error_msg if we fail. // The stack is cleaned up when we fail. - static bool UpdateObsoleteFrames( + static void UpdateObsoleteFrames( art::Thread* thread, art::LinearAlloc* allocator, const std::unordered_set<art::ArtMethod*>& obsoleted_methods, - /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps, - /*out*/std::string* error_msg) REQUIRES(art::Locks::mutator_lock_) { - bool success = true; + /*out*/std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps) + REQUIRES(art::Locks::mutator_lock_) { ObsoleteMethodStackVisitor visitor(thread, allocator, obsoleted_methods, - obsolete_maps, - &success, - error_msg); + obsolete_maps); visitor.WalkStack(); - if (!success) { - RestoreFrames(thread, *obsolete_maps, error_msg); - return false; - } else { - return true; - } - } - - static void RestoreFrames( - art::Thread* thread ATTRIBUTE_UNUSED, - const std::unordered_map<art::ArtMethod*, art::ArtMethod*>& obsolete_maps ATTRIBUTE_UNUSED, - std::string* error_msg) - REQUIRES(art::Locks::mutator_lock_) { - LOG(FATAL) << "Restoring stack frames is not yet supported. Error was: " << *error_msg; } bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) { @@ -130,9 +110,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor { // works through runtime methods. // TODO b/33616143 if (!IsShadowFrame() && prev_was_runtime_frame_) { - *error_msg_ = StringPrintf("Deoptimization failed due to runtime method in stack."); - *success_ = false; - return false; + LOG(FATAL) << "Deoptimization failed due to runtime method in stack. See b/33616143"; } // We cannot ensure that the right dex file is used in inlined frames so we don't support // redefining them. @@ -150,12 +128,8 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor { auto ptr_size = cl->GetImagePointerSize(); const size_t method_size = art::ArtMethod::Size(ptr_size); auto* method_storage = allocator_->Alloc(GetThread(), method_size); - if (method_storage == nullptr) { - *success_ = false; - *error_msg_ = StringPrintf("Unable to allocate storage for obsolete version of '%s'", - old_method->PrettyMethod().c_str()); - return false; - } + CHECK(method_storage != nullptr) << "Unable to allocate storage for obsolete version of '" + << old_method->PrettyMethod() << "'"; new_obsolete_method = new (method_storage) art::ArtMethod(); new_obsolete_method->CopyFrom(old_method, ptr_size); DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass()); @@ -186,13 +160,50 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor { // values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of // the redefined classes ClassExt by the caller. std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_; - bool* success_; // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt // works through runtime methods. bool is_runtime_frame_; - std::string* error_msg_; }; +jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED, + jclass klass, + jboolean* is_redefinable) { + // TODO Check for the appropriate feature flags once we have enabled them. + art::Thread* self = art::Thread::Current(); + art::ScopedObjectAccess soa(self); + art::StackHandleScope<1> hs(self); + art::ObjPtr<art::mirror::Object> obj(self->DecodeJObject(klass)); + if (obj.IsNull()) { + return ERR(INVALID_CLASS); + } + art::Handle<art::mirror::Class> h_klass(hs.NewHandle(obj->AsClass())); + std::string err_unused; + *is_redefinable = + Redefiner::GetClassRedefinitionError(h_klass, &err_unused) == OK ? JNI_TRUE : JNI_FALSE; + return OK; +} + +jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class> klass, + /*out*/std::string* error_msg) { + if (klass->IsPrimitive()) { + *error_msg = "Modification of primitive classes is not supported"; + return ERR(UNMODIFIABLE_CLASS); + } else if (klass->IsInterface()) { + *error_msg = "Modification of Interface classes is currently not supported"; + return ERR(UNMODIFIABLE_CLASS); + } else if (klass->IsArrayClass()) { + *error_msg = "Modification of Array classes is not supported"; + return ERR(UNMODIFIABLE_CLASS); + } else if (klass->IsProxyClass()) { + *error_msg = "Modification of proxy classes is not supported"; + return ERR(UNMODIFIABLE_CLASS); + } + + // TODO We should check if the class has non-obsoletable methods on the stack + LOG(WARNING) << "presence of non-obsoletable methods on stacks is not currently checked"; + return OK; +} + // Moves dex data to an anonymous, read-only mmap'd region. std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location, jint data_len, @@ -446,59 +457,38 @@ struct CallbackCtx { art::LinearAlloc* allocator; std::unordered_map<art::ArtMethod*, art::ArtMethod*> obsolete_map; std::unordered_set<art::ArtMethod*> obsolete_methods; - bool success; - std::string* error_msg; - CallbackCtx(Redefiner* self, art::LinearAlloc* alloc, std::string* error) - : r(self), allocator(alloc), success(true), error_msg(error) {} + CallbackCtx(Redefiner* self, art::LinearAlloc* alloc) + : r(self), allocator(alloc) {} }; -void DoRestoreObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS { - CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata); - ObsoleteMethodStackVisitor::RestoreFrames(t, data->obsolete_map, data->error_msg); -} - void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SAFETY_ANALYSIS { CallbackCtx* data = reinterpret_cast<CallbackCtx*>(vdata); - if (data->success) { - // Don't do anything if we already failed once. - data->success = ObsoleteMethodStackVisitor::UpdateObsoleteFrames(t, - data->allocator, - data->obsolete_methods, - &data->obsolete_map, - data->error_msg); - } + ObsoleteMethodStackVisitor::UpdateObsoleteFrames(t, + data->allocator, + data->obsolete_methods, + &data->obsolete_map); } // This creates any ArtMethod* structures needed for obsolete methods and ensures that the stack is // updated so they will be run. -bool Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) { +void Redefiner::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) { art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking"); art::mirror::ClassExt* ext = art_klass->GetExtData(); CHECK(ext->GetObsoleteMethods() != nullptr); - CallbackCtx ctx(this, art_klass->GetClassLoader()->GetAllocator(), error_msg_); + CallbackCtx ctx(this, art_klass->GetClassLoader()->GetAllocator()); // Add all the declared methods to the map for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) { ctx.obsolete_methods.insert(&m); - } - for (art::ArtMethod* old_method : ctx.obsolete_methods) { - if (old_method->IsIntrinsic()) { - *error_msg_ = StringPrintf("Method '%s' is intrinsic and cannot be made obsolete!", - old_method->PrettyMethod().c_str()); - return false; - } + // TODO Allow this or check in IsModifiableClass. + DCHECK(!m.IsIntrinsic()); } { art::MutexLock mu(self_, *art::Locks::thread_list_lock_); art::ThreadList* list = art::Runtime::Current()->GetThreadList(); list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx)); - if (!ctx.success) { - list->ForEach(DoRestoreObsoleteMethodsCallback, static_cast<void*>(&ctx)); - return false; - } } FillObsoleteMethodMap(art_klass, ctx.obsolete_map); - return true; } // Fills the obsolete method map in the art_klass's extData. This is so obsolete methods are able to @@ -542,6 +532,107 @@ void Redefiner::EnsureObsoleteMethodsAreDeoptimized() { i->ReJitEverything("libOpenJkdJvmti - Class Redefinition"); } +bool Redefiner::CheckClass() { + // TODO Might just want to put it in a ObjPtr and NoSuspend assert. + art::StackHandleScope<1> hs(self_); + // Easy check that only 1 class def is present. + if (dex_file_->NumClassDefs() != 1) { + RecordFailure(ERR(ILLEGAL_ARGUMENT), + StringPrintf("Expected 1 class def in dex file but found %d", + dex_file_->NumClassDefs())); + return false; + } + // Get the ClassDef from the new DexFile. + // Since the dex file has only a single class def the index is always 0. + const art::DexFile::ClassDef& def = dex_file_->GetClassDef(0); + // Get the class as it is now. + art::Handle<art::mirror::Class> current_class(hs.NewHandle(GetMirrorClass())); + + // Check the access flags didn't change. + if (def.GetJavaAccessFlags() != (current_class->GetAccessFlags() & art::kAccValidClassFlags)) { + RecordFailure(ERR(UNSUPPORTED_REDEFINITION_CLASS_MODIFIERS_CHANGED), + "Cannot change modifiers of class by redefinition"); + return false; + } + + // Check class name. + // These should have been checked by the dexfile verifier on load. + DCHECK_NE(def.class_idx_, art::dex::TypeIndex::Invalid()) << "Invalid type index"; + const char* descriptor = dex_file_->StringByTypeIdx(def.class_idx_); + DCHECK(descriptor != nullptr) << "Invalid dex file structure!"; + if (!current_class->DescriptorEquals(descriptor)) { + std::string storage; + RecordFailure(ERR(NAMES_DONT_MATCH), + StringPrintf("expected file to contain class called '%s' but found '%s'!", + current_class->GetDescriptor(&storage), + descriptor)); + return false; + } + if (current_class->IsObjectClass()) { + if (def.superclass_idx_ != art::dex::TypeIndex::Invalid()) { + RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Superclass added!"); + return false; + } + } else { + const char* super_descriptor = dex_file_->StringByTypeIdx(def.superclass_idx_); + DCHECK(descriptor != nullptr) << "Invalid dex file structure!"; + if (!current_class->GetSuperClass()->DescriptorEquals(super_descriptor)) { + RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Superclass changed"); + return false; + } + } + const art::DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(def); + if (interfaces == nullptr) { + if (current_class->NumDirectInterfaces() != 0) { + RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added"); + return false; + } + } else { + DCHECK(!current_class->IsProxyClass()); + const art::DexFile::TypeList* current_interfaces = current_class->GetInterfaceTypeList(); + if (current_interfaces == nullptr || current_interfaces->Size() != interfaces->Size()) { + RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added or removed"); + return false; + } + // The order of interfaces is (barely) meaningful so we error if it changes. + const art::DexFile& orig_dex_file = current_class->GetDexFile(); + for (uint32_t i = 0; i < interfaces->Size(); i++) { + if (strcmp( + dex_file_->StringByTypeIdx(interfaces->GetTypeItem(i).type_idx_), + orig_dex_file.StringByTypeIdx(current_interfaces->GetTypeItem(i).type_idx_)) != 0) { + RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), + "Interfaces changed or re-ordered"); + return false; + } + } + } + LOG(WARNING) << "No verification is done on annotations of redefined classes."; + + return true; +} + +// TODO Move this to use IsRedefinable when that function is made. +bool Redefiner::CheckRedefinable() { + std::string err; + art::StackHandleScope<1> hs(self_); + + art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass())); + jvmtiError res = Redefiner::GetClassRedefinitionError(h_klass, &err); + if (res != OK) { + RecordFailure(res, err); + return false; + } else { + return true; + } +} + +bool Redefiner::CheckRedefinitionIsValid() { + return CheckRedefinable() && + CheckClass() && + CheckSameFields() && + CheckSameMethods(); +} + jvmtiError Redefiner::Run() { art::StackHandleScope<5> hs(self_); // TODO We might want to have a global lock (or one based on the class being redefined at least) @@ -552,7 +643,7 @@ jvmtiError Redefiner::Run() { // doing a try loop. The other allocations we need to ensure that nothing has changed in the time // between allocating them and pausing all threads before we can update them so we need to do a // try loop. - if (!EnsureRedefinitionIsValid() || !EnsureClassAllocationsFinished()) { + if (!CheckRedefinitionIsValid() || !EnsureClassAllocationsFinished()) { return result_; } art::MutableHandle<art::mirror::ClassLoader> source_class_loader( @@ -601,31 +692,9 @@ jvmtiError Redefiner::Run() { // TODO We should really Retry if this fails instead of simply aborting. // Set the new DexFileCookie returns the original so we can fix it back up if redefinition fails art::ObjPtr<art::mirror::LongArray> original_dex_file_cookie(nullptr); - if (!UpdateJavaDexFile(java_dex_file.Get(), - new_dex_file_cookie.Get(), - &original_dex_file_cookie) || - !FindAndAllocateObsoleteMethods(art_class.Get())) { - // Release suspendAll - runtime_->GetThreadList()->ResumeAll(); - // Get back shared mutator lock as expected for return. - self_->TransitionFromSuspendedToRunnable(); - if (heap->IsGcConcurrentAndMoving()) { - heap->DecrementDisableMovingGC(self_); - } - return result_; - } - if (!UpdateClass(art_class.Get(), new_dex_cache.Get())) { - // TODO Should have some form of scope to do this. - RestoreJavaDexFile(java_dex_file.Get(), original_dex_file_cookie); - // Release suspendAll - runtime_->GetThreadList()->ResumeAll(); - // Get back shared mutator lock as expected for return. - self_->TransitionFromSuspendedToRunnable(); - if (heap->IsGcConcurrentAndMoving()) { - heap->DecrementDisableMovingGC(self_); - } - return result_; - } + UpdateJavaDexFile(java_dex_file.Get(), new_dex_file_cookie.Get(), &original_dex_file_cookie); + FindAndAllocateObsoleteMethods(art_class.Get()); + UpdateClass(art_class.Get(), new_dex_cache.Get()); // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have // pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the // DexCache. @@ -652,21 +721,7 @@ jvmtiError Redefiner::Run() { return OK; } -void Redefiner::RestoreJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file, - art::ObjPtr<art::mirror::LongArray> orig_cookie) { - art::ArtField* internal_cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField( - "mInternalCookie", "Ljava/lang/Object;"); - art::ArtField* cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField( - "mCookie", "Ljava/lang/Object;"); - art::ObjPtr<art::mirror::LongArray> new_cookie( - cookie_field->GetObject(java_dex_file)->AsLongArray()); - internal_cookie_field->SetObject<false>(java_dex_file, orig_cookie); - if (!new_cookie.IsNull()) { - cookie_field->SetObject<false>(java_dex_file, orig_cookie); - } -} - -bool Redefiner::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass, +void Redefiner::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass, art::ObjPtr<art::mirror::DexCache> new_dex_cache, const art::DexFile::ClassDef& class_def) { art::ClassLinker* linker = runtime_->GetClassLinker(); @@ -709,10 +764,9 @@ bool Redefiner::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass, jit->GetCodeCache()->NotifyMethodRedefined(&method); } } - return true; } -bool Redefiner::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) { +void Redefiner::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) { // TODO The IFields & SFields pointers should be combined like the methods_ arrays were. for (auto fields_iter : {mclass->GetIFields(), mclass->GetSFields()}) { for (art::ArtField& field : fields_iter) { @@ -730,28 +784,16 @@ bool Redefiner::UpdateFields(art::ObjPtr<art::mirror::Class> mclass) { field.SetDexFieldIndex(dex_file_->GetIndexForFieldId(*new_field_id)); } } - return true; } // Performs updates to class that will allow us to verify it. -bool Redefiner::UpdateClass(art::ObjPtr<art::mirror::Class> mclass, +void Redefiner::UpdateClass(art::ObjPtr<art::mirror::Class> mclass, art::ObjPtr<art::mirror::DexCache> new_dex_cache) { const art::DexFile::ClassDef* class_def = art::OatFile::OatDexFile::FindClassDef( *dex_file_, class_sig_, art::ComputeModifiedUtf8Hash(class_sig_)); - if (class_def == nullptr) { - RecordFailure(ERR(INVALID_CLASS_FORMAT), "Unable to find ClassDef!"); - return false; - } - if (!UpdateMethods(mclass, new_dex_cache, *class_def)) { - // TODO Investigate appropriate error types. - RecordFailure(ERR(INTERNAL), "Unable to update class methods."); - return false; - } - if (!UpdateFields(mclass)) { - // TODO Investigate appropriate error types. - RecordFailure(ERR(INTERNAL), "Unable to update class fields."); - return false; - } + DCHECK(class_def != nullptr); + UpdateMethods(mclass, new_dex_cache, *class_def); + UpdateFields(mclass); // Update the class fields. // Need to update class last since the ArtMethod gets its DexFile from the class (which is needed @@ -759,10 +801,9 @@ bool Redefiner::UpdateClass(art::ObjPtr<art::mirror::Class> mclass, mclass->SetDexCache(new_dex_cache.Ptr()); mclass->SetDexClassDefIndex(dex_file_->GetIndexForClassDef(*class_def)); mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_))); - return true; } -bool Redefiner::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file, +void Redefiner::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file, art::ObjPtr<art::mirror::LongArray> new_cookie, /*out*/art::ObjPtr<art::mirror::LongArray>* original_cookie) { art::ArtField* internal_cookie_field = java_dex_file->GetClass()->FindDeclaredInstanceField( @@ -779,7 +820,6 @@ bool Redefiner::UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file if (!orig_cookie.IsNull()) { cookie_field->SetObject<false>(java_dex_file, new_cookie); } - return true; } // This function does all (java) allocations we need to do for the Class being redefined. diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h index 9d23ce445f..5852309291 100644 --- a/runtime/openjdkjvmti/ti_redefine.h +++ b/runtime/openjdkjvmti/ti_redefine.h @@ -80,6 +80,8 @@ class Redefiner { unsigned char* dex_data, std::string* error_msg); + static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable); + private: jvmtiError result_; art::Runtime* runtime_; @@ -106,6 +108,10 @@ class Redefiner { error_msg_(error_msg), class_sig_(class_sig) { } + static jvmtiError GetClassRedefinitionError(art::Handle<art::mirror::Class> klass, + /*out*/std::string* error_msg) + REQUIRES_SHARED(art::Locks::mutator_lock_); + static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location, jint data_len, unsigned char* dex_data, @@ -152,36 +158,47 @@ class Redefiner { void RecordFailure(jvmtiError result, const std::string& error_msg); - // TODO Actually write this. // This will check that no constraints are violated (more than 1 class in dex file, any changes in // number/declaration of methods & fields, changes in access flags, etc.) - bool EnsureRedefinitionIsValid() { - LOG(WARNING) << "Redefinition is not checked for validity currently"; + bool CheckRedefinitionIsValid() REQUIRES_SHARED(art::Locks::mutator_lock_); + + // Checks that the class can even be redefined. + bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_); + + // Checks that the dex file does not add/remove methods. + bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) { + LOG(WARNING) << "methods are not checked for modification currently"; return true; } - bool UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file, + // Checks that the dex file does not modify fields + bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) { + LOG(WARNING) << "Fields are not checked for modification currently"; + return true; + } + + // Checks that the dex file contains only the single expected class and that the top-level class + // data has not been modified in an incompatible manner. + bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_); + + void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file, art::ObjPtr<art::mirror::LongArray> new_cookie, /*out*/art::ObjPtr<art::mirror::LongArray>* original_cookie) REQUIRES(art::Locks::mutator_lock_); - void RestoreJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file, - art::ObjPtr<art::mirror::LongArray> original_cookie) - REQUIRES(art::Locks::mutator_lock_); - - bool UpdateFields(art::ObjPtr<art::mirror::Class> mclass) + void UpdateFields(art::ObjPtr<art::mirror::Class> mclass) REQUIRES(art::Locks::mutator_lock_); - bool UpdateMethods(art::ObjPtr<art::mirror::Class> mclass, + void UpdateMethods(art::ObjPtr<art::mirror::Class> mclass, art::ObjPtr<art::mirror::DexCache> new_dex_cache, const art::DexFile::ClassDef& class_def) REQUIRES(art::Locks::mutator_lock_); - bool UpdateClass(art::ObjPtr<art::mirror::Class> mclass, + void UpdateClass(art::ObjPtr<art::mirror::Class> mclass, art::ObjPtr<art::mirror::DexCache> new_dex_cache) REQUIRES(art::Locks::mutator_lock_); - bool FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) + void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) REQUIRES(art::Locks::mutator_lock_); void FillObsoleteMethodMap(art::mirror::Class* art_klass, diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index a81458fded..b809c3eb56 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -140,7 +140,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor); }; -void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) { +void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) { DCHECK(!is_deoptimization_); if (kDebugExceptionDelivery) { mirror::String* msg = exception->GetDetailMessage(); diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h index 5592126a61..3ead7dbe64 100644 --- a/runtime/quick_exception_handler.h +++ b/runtime/quick_exception_handler.h @@ -46,7 +46,7 @@ class QuickExceptionHandler { } // Find the catch handler for the given exception. - void FindCatch(mirror::Throwable* exception) REQUIRES_SHARED(Locks::mutator_lock_); + void FindCatch(ObjPtr<mirror::Throwable> exception) REQUIRES_SHARED(Locks::mutator_lock_); // Deoptimize the stack to the upcall/some code that's not deoptimizeable. For // every compiled frame, we create a "copy" shadow frame that will be executed diff --git a/runtime/thread.cc b/runtime/thread.cc index aff12ff4c1..bdd4ca6721 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -154,18 +154,18 @@ class DeoptimizationContextRecord { DeoptimizationContextRecord(const JValue& ret_val, bool is_reference, bool from_code, - mirror::Throwable* pending_exception, + ObjPtr<mirror::Throwable> pending_exception, DeoptimizationContextRecord* link) : ret_val_(ret_val), is_reference_(is_reference), from_code_(from_code), - pending_exception_(pending_exception), + pending_exception_(pending_exception.Ptr()), link_(link) {} JValue GetReturnValue() const { return ret_val_; } bool IsReference() const { return is_reference_; } bool GetFromCode() const { return from_code_; } - mirror::Throwable* GetPendingException() const { return pending_exception_; } + ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; } DeoptimizationContextRecord* GetLink() const { return link_; } mirror::Object** GetReturnValueAsGCRoot() { DCHECK(is_reference_); @@ -219,7 +219,7 @@ class StackedShadowFrameRecord { void Thread::PushDeoptimizationContext(const JValue& return_value, bool is_reference, bool from_code, - mirror::Throwable* exception) { + ObjPtr<mirror::Throwable> exception) { DeoptimizationContextRecord* record = new DeoptimizationContextRecord( return_value, is_reference, @@ -230,7 +230,7 @@ void Thread::PushDeoptimizationContext(const JValue& return_value, } void Thread::PopDeoptimizationContext(JValue* result, - mirror::Throwable** exception, + ObjPtr<mirror::Throwable>* exception, bool* from_code) { AssertHasDeoptimizationContext(); DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; @@ -434,7 +434,7 @@ void* Thread::CreateCallback(void* arg) { Dbg::PostThreadStart(self); // Invoke the 'run' method of our java.lang.Thread. - mirror::Object* receiver = self->tlsPtr_.opeer; + ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer; jmethodID mid = WellKnownClasses::java_lang_Thread_run; ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr); @@ -446,7 +446,7 @@ void* Thread::CreateCallback(void* arg) { } Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, - mirror::Object* thread_peer) { + ObjPtr<mirror::Object> thread_peer) { ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer); Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer))); // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ @@ -1573,8 +1573,8 @@ struct StackDumpVisitor : public StackVisitor { } m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize); const int kMaxRepetition = 3; - mirror::Class* c = m->GetDeclaringClass(); - mirror::DexCache* dex_cache = c->GetDexCache(); + ObjPtr<mirror::Class> c = m->GetDeclaringClass(); + ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache(); int line_number = -1; if (dex_cache != nullptr) { // be tolerant of bad input const DexFile* dex_file = dex_cache->GetDexFile(); @@ -1860,17 +1860,15 @@ void Thread::AssertPendingOOMException() const { void Thread::AssertNoPendingException() const { if (UNLIKELY(IsExceptionPending())) { ScopedObjectAccess soa(Thread::Current()); - mirror::Throwable* exception = GetException(); - LOG(FATAL) << "No pending exception expected: " << exception->Dump(); + LOG(FATAL) << "No pending exception expected: " << GetException()->Dump(); } } void Thread::AssertNoPendingExceptionForNewException(const char* msg) const { if (UNLIKELY(IsExceptionPending())) { ScopedObjectAccess soa(Thread::Current()); - mirror::Throwable* exception = GetException(); LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: " - << exception->Dump(); + << GetException()->Dump(); } } @@ -2213,7 +2211,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { // class of the ArtMethod pointers. ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); StackHandleScope<1> hs(self_); - mirror::Class* array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass); + ObjPtr<mirror::Class> array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass); // The first element is the methods and dex pc array, the other elements are declaring classes // for the methods to ensure classes in the stack trace don't get unloaded. Handle<mirror::ObjectArray<mirror::Object>> trace( @@ -2225,7 +2223,8 @@ class BuildInternalStackTraceVisitor : public StackVisitor { self_->AssertPendingOOMException(); return false; } - mirror::PointerArray* methods_and_pcs = class_linker->AllocPointerArray(self_, depth * 2); + ObjPtr<mirror::PointerArray> methods_and_pcs = + class_linker->AllocPointerArray(self_, depth * 2); const char* last_no_suspend_cause = self_->StartAssertNoThreadSuspension("Building internal stack trace"); if (methods_and_pcs == nullptr) { @@ -2255,7 +2254,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor { if (m->IsRuntimeMethod()) { return true; // Ignore runtime frames (in particular callee save). } - mirror::PointerArray* trace_methods_and_pcs = GetTraceMethodsAndPCs(); + ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, m, pointer_size_); trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( trace_methods_and_pcs->GetLength() / 2 + count_, @@ -2268,8 +2267,8 @@ class BuildInternalStackTraceVisitor : public StackVisitor { return true; } - mirror::PointerArray* GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { - return down_cast<mirror::PointerArray*>(trace_->Get(0)); + ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { + return ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(trace_->Get(0))); } mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { @@ -2311,7 +2310,7 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable build_trace_visitor.WalkStack(); mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); if (kIsDebugBuild) { - mirror::PointerArray* trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); + ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); // Second half of trace_methods is dex PCs. for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) { auto* method = trace_methods->GetElementPtrSize<ArtMethod*>( @@ -2326,7 +2325,7 @@ template jobject Thread::CreateInternalStackTrace<false>( template jobject Thread::CreateInternalStackTrace<true>( const ScopedObjectAccessAlreadyRunnable& soa) const; -bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const { +bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { CountStackDepthVisitor count_visitor(const_cast<Thread*>(this)); count_visitor.WalkStack(); return count_visitor.GetDepth() == exception->GetStackDepth(); @@ -2368,12 +2367,12 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray( } for (int32_t i = 0; i < depth; ++i) { - mirror::ObjectArray<mirror::Object>* decoded_traces = + ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces = soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>(); // Methods and dex PC trace is element 0. DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); - mirror::PointerArray* const method_trace = - down_cast<mirror::PointerArray*>(decoded_traces->Get(0)); + ObjPtr<mirror::PointerArray> const method_trace = + ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(decoded_traces->Get(0))); // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize); uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>( @@ -2415,8 +2414,11 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray( if (method_name_object.Get() == nullptr) { return nullptr; } - mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc( - soa.Self(), class_name_object, method_name_object, source_name_object, line_number); + ObjPtr<mirror::StackTraceElement> obj =mirror::StackTraceElement::Alloc(soa.Self(), + class_name_object, + method_name_object, + source_name_object, + line_number); if (obj == nullptr) { return nullptr; } @@ -2447,7 +2449,7 @@ void Thread::ThrowNewException(const char* exception_class_descriptor, ThrowNewWrappedException(exception_class_descriptor, msg); } -static mirror::ClassLoader* GetCurrentClassLoader(Thread* self) +static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod* method = self->GetCurrentMethod(nullptr); return method != nullptr @@ -2627,10 +2629,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { QUICK_ENTRY_POINT_INFO(pAllocArray) QUICK_ENTRY_POINT_INFO(pAllocArrayResolved) QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck) - QUICK_ENTRY_POINT_INFO(pAllocObject) QUICK_ENTRY_POINT_INFO(pAllocObjectResolved) QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized) - QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck) + QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks) QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray) QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck) QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes) @@ -2794,7 +2795,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { void Thread::QuickDeliverException() { // Get exception from thread. - mirror::Throwable* exception = GetException(); + ObjPtr<mirror::Throwable> exception = GetException(); CHECK(exception != nullptr); if (exception == GetDeoptimizationException()) { artDeoptimize(this); @@ -2807,8 +2808,8 @@ void Thread::QuickDeliverException() { IsExceptionThrownByCurrentMethod(exception)) { // Instrumentation may cause GC so keep the exception object safe. StackHandleScope<1> hs(this); - HandleWrapper<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); - instrumentation->ExceptionCaughtEvent(this, exception); + HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); + instrumentation->ExceptionCaughtEvent(this, exception.Ptr()); } // Does instrumentation need to deoptimize the stack? // Note: we do this *after* reporting the exception to instrumentation in case it @@ -2870,7 +2871,7 @@ struct CurrentMethodVisitor FINAL : public StackVisitor { dex_pc_ = GetDexPc(abort_on_error_); return false; } - mirror::Object* this_object_; + ObjPtr<mirror::Object> this_object_; ArtMethod* method_; uint32_t dex_pc_; const bool abort_on_error_; @@ -2885,11 +2886,8 @@ ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const return visitor.method_; } -bool Thread::HoldsLock(mirror::Object* object) const { - if (object == nullptr) { - return false; - } - return object->GetLockOwnerThreadId() == GetThreadId(); +bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const { + return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId(); } // RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). @@ -2945,7 +2943,7 @@ class ReferenceMapVisitor : public StackVisitor { void VisitDeclaringClass(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS { - mirror::Class* klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>(); + ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>(); // klass can be null for runtime methods. if (klass != nullptr) { if (kVerifyImageObjectsMarked) { @@ -2954,10 +2952,10 @@ class ReferenceMapVisitor : public StackVisitor { /*fail_ok*/true); if (space != nullptr && space->IsImageSpace()) { bool failed = false; - if (!space->GetLiveBitmap()->Test(klass)) { + if (!space->GetLiveBitmap()->Test(klass.Ptr())) { failed = true; LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space; - } else if (!heap->GetLiveBitmap()->Test(klass)) { + } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) { failed = true; LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space; } @@ -2965,17 +2963,17 @@ class ReferenceMapVisitor : public StackVisitor { GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT)); LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method - << " klass@" << klass; + << " klass@" << klass.Ptr(); // Pretty info last in case it crashes. LOG(FATAL) << "Method " << method->PrettyMethod() << " klass " << klass->PrettyClass(); } } } - mirror::Object* new_ref = klass; + mirror::Object* new_ref = klass.Ptr(); visitor_(&new_ref, -1, this); if (new_ref != klass) { - method->CASDeclaringClass(klass, new_ref->AsClass()); + method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); } } } @@ -3367,7 +3365,7 @@ void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { ClearException(); ShadowFrame* shadow_frame = PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame); - mirror::Throwable* pending_exception = nullptr; + ObjPtr<mirror::Throwable> pending_exception; bool from_code = false; PopDeoptimizationContext(result, &pending_exception, &from_code); SetTopOfStack(nullptr); diff --git a/runtime/thread.h b/runtime/thread.h index 411d85f015..a3ef9bc0a3 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -177,7 +177,7 @@ class Thread { void CheckEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, - mirror::Object* thread_peer) + ObjPtr<mirror::Object> thread_peer) REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) REQUIRES_SHARED(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread) @@ -312,7 +312,7 @@ class Thread { size_t NumberOfHeldMutexes() const; - bool HoldsLock(mirror::Object*) const REQUIRES_SHARED(Locks::mutator_lock_); + bool HoldsLock(ObjPtr<mirror::Object> object) const REQUIRES_SHARED(Locks::mutator_lock_); /* * Changes the priority of this thread to match that of the java.lang.Thread object. @@ -413,7 +413,7 @@ class Thread { // Returns whether the given exception was thrown by the current Java method being executed // (Note that this includes native Java methods). - bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const + bool IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const REQUIRES_SHARED(Locks::mutator_lock_); void SetTopOfStack(ArtMethod** top_method) { @@ -925,9 +925,11 @@ class Thread { void PushDeoptimizationContext(const JValue& return_value, bool is_reference, bool from_code, - mirror::Throwable* exception) + ObjPtr<mirror::Throwable> exception) REQUIRES_SHARED(Locks::mutator_lock_); - void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception, bool* from_code) + void PopDeoptimizationContext(JValue* result, + ObjPtr<mirror::Throwable>* exception, + bool* from_code) REQUIRES_SHARED(Locks::mutator_lock_); void AssertHasDeoptimizationContext() REQUIRES_SHARED(Locks::mutator_lock_); @@ -1416,7 +1418,7 @@ class Thread { stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr), frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0), last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr), - thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_start(nullptr), + thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr), nested_signal_state(nullptr), @@ -1540,12 +1542,12 @@ class Thread { JniEntryPoints jni_entrypoints; QuickEntryPoints quick_entrypoints; + // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM. + uint8_t* thread_local_start; // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for // potentially better performance. uint8_t* thread_local_pos; uint8_t* thread_local_end; - // Thread-local allocation pointer. - uint8_t* thread_local_start; size_t thread_local_objects; diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index bf9eef8370..c5c7e2cc16 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -653,8 +653,9 @@ void ThreadList::SuspendAllInternal(Thread* self, // is done with a timeout so that we can detect problems. #if ART_USE_FUTEXES timespec wait_timeout; - InitTimeSpec(true, CLOCK_MONOTONIC, 10000, 0, &wait_timeout); + InitTimeSpec(false, CLOCK_MONOTONIC, kIsDebugBuild ? 50000 : 10000, 0, &wait_timeout); #endif + const uint64_t start_time = NanoTime(); while (true) { int32_t cur_val = pending_threads.LoadRelaxed(); if (LIKELY(cur_val > 0)) { @@ -664,7 +665,8 @@ void ThreadList::SuspendAllInternal(Thread* self, if ((errno != EAGAIN) && (errno != EINTR)) { if (errno == ETIMEDOUT) { LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR) - << "Unexpected time out during suspend all."; + << "Timed out waiting for threads to suspend, waited for " + << PrettyDuration(NanoTime() - start_time); } else { PLOG(FATAL) << "futex wait failed for SuspendAllInternal()"; } @@ -672,6 +674,7 @@ void ThreadList::SuspendAllInternal(Thread* self, } // else re-check pending_threads in the next iteration (this may be a spurious wake-up). #else // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set. + UNUSED(start_time); #endif } else { CHECK_EQ(cur_val, 0); diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index 7b5ced19dc..a5b275cc3b 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -337,7 +337,6 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_ref_ReferenceQueue_add = CacheMethod(env, java_lang_ref_ReferenceQueue.get(), true, "add", "(Ljava/lang/ref/Reference;)V"); java_lang_reflect_Parameter_init = CacheMethod(env, java_lang_reflect_Parameter, false, "<init>", "(Ljava/lang/String;ILjava/lang/reflect/Executable;I)V"); - java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;"); java_lang_Thread_dispatchUncaughtException = CacheMethod(env, java_lang_Thread, false, "dispatchUncaughtException", "(Ljava/lang/Throwable;)V"); java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V"); java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V"); @@ -371,7 +370,6 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "backtrace", "Ljava/lang/Object;"); java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;"); java_lang_reflect_Executable_artMethod = CacheField(env, java_lang_reflect_Executable, false, "artMethod", "J"); - java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;"); java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I"); java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "address", "J"); java_util_ArrayList_array = CacheField(env, java_util_ArrayList, false, "elementData", "[Ljava/lang/Object;"); @@ -398,10 +396,20 @@ void WellKnownClasses::Init(JNIEnv* env) { void WellKnownClasses::LateInit(JNIEnv* env) { ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime")); + // CacheField and CacheMethod will initialize their classes. Classes below + // have clinit sections that call JNI methods. Late init is required + // to make sure these JNI methods are available. java_lang_Runtime_nativeLoad = CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad", "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)" "Ljava/lang/String;"); + java_lang_reflect_Proxy_invoke = + CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", + "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;" + "[Ljava/lang/Object;)Ljava/lang/Object;"); + java_lang_reflect_Proxy_h = + CacheField(env, java_lang_reflect_Proxy, false, "h", + "Ljava/lang/reflect/InvocationHandler;"); } ObjPtr<mirror::Class> WellKnownClasses::ToClass(jclass global_jclass) { diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java index 5fd51e1dca..89b9cb45c3 100644 --- a/test/529-checker-unresolved/src/Main.java +++ b/test/529-checker-unresolved/src/Main.java @@ -192,13 +192,13 @@ public class Main extends UnresolvedSuperClass { /// CHECK-START: void Main.testLicm(int) licm (before) /// CHECK: <<Class:l\d+>> LoadClass loop:B2 /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:B2 - /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2 + /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2 /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2 /// CHECK-START: void Main.testLicm(int) licm (after) /// CHECK: <<Class:l\d+>> LoadClass loop:none /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:none - /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2 + /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2 /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2 static public void testLicm(int count) { // Test to make sure we keep the initialization check after loading an unresolved class. diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java index 26475ae55c..ed7524c7ad 100644 --- a/test/536-checker-intrinsic-optimization/src/Main.java +++ b/test/536-checker-intrinsic-optimization/src/Main.java @@ -153,8 +153,8 @@ public class Main { /// CHECK-DAG: <<Pos:i\d+>> ParameterValue /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<String>>] /// CHECK-DAG: <<Length:i\d+>> ArrayLength [<<NullCk>>] is_string_length:true - /// CHECK-DAG: BoundsCheck [<<Pos>>,<<Length>>] is_string_char_at:true - /// CHECK-DAG: <<Char:c\d+>> ArrayGet [<<NullCk>>,<<Pos>>] is_string_char_at:true + /// CHECK-DAG: <<Bounds:i\d+>> BoundsCheck [<<Pos>>,<<Length>>] is_string_char_at:true + /// CHECK-DAG: <<Char:c\d+>> ArrayGet [<<NullCk>>,<<Bounds>>] is_string_char_at:true /// CHECK-DAG: Return [<<Char>>] /// CHECK-START: char Main.$opt$noinline$stringCharAt(java.lang.String, int) instruction_simplifier (after) @@ -174,8 +174,8 @@ public class Main { /// CHECK-DAG: <<Pos:i\d+>> ParameterValue /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<String>>] /// CHECK-DAG: <<Length:i\d+>> ArrayLength [<<NullCk>>] is_string_length:true - /// CHECK-DAG: BoundsCheck [<<Pos>>,<<Length>>] is_string_char_at:true - /// CHECK-DAG: <<Char:c\d+>> ArrayGet [<<NullCk>>,<<Pos>>] is_string_char_at:true + /// CHECK-DAG: <<Bounds:i\d+>> BoundsCheck [<<Pos>>,<<Length>>] is_string_char_at:true + /// CHECK-DAG: <<Char:c\d+>> ArrayGet [<<NullCk>>,<<Bounds>>] is_string_char_at:true /// CHECK-DAG: Return [<<Char>>] /// CHECK-START: char Main.$opt$noinline$stringCharAtCatch(java.lang.String, int) instruction_simplifier (after) diff --git a/test/621-checker-new-instance/info.txt b/test/621-checker-new-instance/info.txt deleted file mode 100644 index c27c45ca7f..0000000000 --- a/test/621-checker-new-instance/info.txt +++ /dev/null @@ -1 +0,0 @@ -Tests for removing useless load class. diff --git a/test/621-checker-new-instance/src/Main.java b/test/621-checker-new-instance/src/Main.java deleted file mode 100644 index 68a46449f0..0000000000 --- a/test/621-checker-new-instance/src/Main.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public class Main { - /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (before) - /// CHECK: LoadClass - /// CHECK: NewInstance - - /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (after) - /// CHECK-NOT: LoadClass - /// CHECK: NewInstance - public static Object newObject() { - return new Object(); - } - - /// CHECK-START: java.lang.Object Main.newFinalizableMayThrow() prepare_for_register_allocation (after) - /// CHECK: LoadClass - /// CHECK: NewInstance - public static Object newFinalizableMayThrow() { - return $inline$newFinalizableMayThrow(); - } - - public static Object $inline$newFinalizableMayThrow() { - return new FinalizableMayThrow(); - } - - public static void main(String[] args) { - newFinalizableMayThrow(); - newObject(); - } -} - -class FinalizableMayThrow { - // clinit may throw OOME. - static Object o = new Object(); - static String s; - public void finalize() { - s = "Test"; - } -} diff --git a/test/621-checker-new-instance/expected.txt b/test/631-checker-get-class/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/621-checker-new-instance/expected.txt +++ b/test/631-checker-get-class/expected.txt diff --git a/test/631-checker-get-class/info.txt b/test/631-checker-get-class/info.txt new file mode 100644 index 0000000000..f236a22e1f --- /dev/null +++ b/test/631-checker-get-class/info.txt @@ -0,0 +1,4 @@ +Checker test to make sure we recognize the pattern: +if (foo.getClass() == Foo.class) + +For doing better type propagation. diff --git a/test/631-checker-get-class/src/Main.java b/test/631-checker-get-class/src/Main.java new file mode 100644 index 0000000000..61c0adf624 --- /dev/null +++ b/test/631-checker-get-class/src/Main.java @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + /// CHECK-START: int Main.bar(Main) inliner (before) + /// CHECK: InvokeVirtual + /// CHECK: InvokeVirtual + + /// CHECK-START: int Main.bar(Main) inliner (after) + /// CHECK-NOT: InvokeVirtual + public static int bar(Main m) { + if (m.getClass() == Main.class) { + return m.foo(); + } + return 4; + } + + public int foo() { + return 42; + } + + /// CHECK-START: boolean Main.classEquality1() instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Eq:z\d+>> {{Equal|NotEqual}} + /// CHECK-DAG: <<Select:i\d+>> Select [{{i\d+}},{{i\d+}},<<Eq>>] + /// CHECK-DAG: Return [<<Select>>] + + /// CHECK-START: boolean Main.classEquality1() instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Constant:i\d+>> IntConstant 1 + /// CHECK-DAG: Return [<<Constant>>] + public static boolean classEquality1() { + return new Main().getClass() == Main.class; + } + + /// CHECK-START: boolean Main.classEquality2() instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Eq:z\d+>> {{Equal|NotEqual}} + /// CHECK-DAG: <<Select:i\d+>> Select [{{i\d+}},{{i\d+}},<<Eq>>] + /// CHECK-DAG: Return [<<Select>>] + + /// CHECK-START: boolean Main.classEquality2() instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Constant:i\d+>> IntConstant 0 + /// CHECK-DAG: Return [<<Constant>>] + public static boolean classEquality2() { + Object o = new SubMain(); + return o.getClass() == Main.class; + } + + /// CHECK-START: boolean Main.classEquality3() instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Eq:z\d+>> {{Equal|NotEqual}} + /// CHECK-DAG: <<Select:i\d+>> Select [{{i\d+}},{{i\d+}},<<Eq>>] + /// CHECK-DAG: Return [<<Select>>] + + /// CHECK-START: boolean Main.classEquality3() instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Constant:i\d+>> IntConstant 0 + /// CHECK-DAG: Return [<<Constant>>] + public static boolean classEquality3() { + return new Main().getClass() != Main.class; + } + + /// CHECK-START: boolean Main.classEquality4() instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Eq:z\d+>> {{Equal|NotEqual}} + /// CHECK-DAG: <<Select:i\d+>> Select [{{i\d+}},{{i\d+}},<<Eq>>] + /// CHECK-DAG: Return [<<Select>>] + + /// CHECK-START: boolean Main.classEquality4() instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Constant:i\d+>> IntConstant 1 + /// CHECK-DAG: Return [<<Constant>>] + public static boolean classEquality4() { + Object o = new SubMain(); + return o.getClass() != Main.class; + } + + public static void main(String[] args) { + int actual = bar(new Main()); + if (actual != 42) { + throw new Error("Expected 42, got " + actual); + } + actual = bar(new SubMain()); + if (actual != 4) { + throw new Error("Expected 4, got " + actual); + } + } +} + +class SubMain extends Main { +} diff --git a/test/632-checker-char-at-bounds/expected.txt b/test/632-checker-char-at-bounds/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/632-checker-char-at-bounds/expected.txt diff --git a/test/632-checker-char-at-bounds/info.txt b/test/632-checker-char-at-bounds/info.txt new file mode 100644 index 0000000000..10b9a447d4 --- /dev/null +++ b/test/632-checker-char-at-bounds/info.txt @@ -0,0 +1,2 @@ +Regression test for the optimization of String.charAt, which +had its SSA dependency incorrect with its corresponding bound check. diff --git a/test/632-checker-char-at-bounds/src/Main.java b/test/632-checker-char-at-bounds/src/Main.java new file mode 100644 index 0000000000..65022d0af9 --- /dev/null +++ b/test/632-checker-char-at-bounds/src/Main.java @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + /// CHECK-START: void Main.main(java.lang.String[]) licm (after) + /// CHECK-DAG: <<NullCheck:l\d+>> NullCheck + /// CHECK-DAG: <<BoundsCheck:i\d+>> BoundsCheck + /// CHECK-DAG: ArrayGet [<<NullCheck>>,<<BoundsCheck>>] + public static void main(String[] args) { + try { + String foo = myString; + foo.getClass(); // Make sure the null check is not in the loop. + char c = 0; + for (int i = 0; i < 10; i++) { + // The charAt may be licm'ed, but it has to be licm'ed with its + // bounds check. + c = foo.charAt(10000000); + } + System.out.println(c); + } catch (StringIndexOutOfBoundsException e) { + // Expected + } + } + + static String myString = "foo"; +} diff --git a/test/910-methods/expected.txt b/test/910-methods/expected.txt index 9a747994fe..c913b3ffe5 100644 --- a/test/910-methods/expected.txt +++ b/test/910-methods/expected.txt @@ -1,15 +1,59 @@ [toString, ()Ljava/lang/String;, null] class java.lang.Object 1 +Max locals: 3 +Argument size: 1 +Location start: 0 +Location end: 40 +Is native: false +Is obsolete: false +Is synthetic: false [charAt, (I)C, null] class java.lang.String 257 +Max locals: JVMTI_ERROR_NATIVE_METHOD +Argument size: JVMTI_ERROR_NATIVE_METHOD +Location start: JVMTI_ERROR_NATIVE_METHOD +Location end: JVMTI_ERROR_NATIVE_METHOD +Is native: true +Is obsolete: false +Is synthetic: false [sqrt, (D)D, null] class java.lang.Math 265 +Max locals: JVMTI_ERROR_NATIVE_METHOD +Argument size: JVMTI_ERROR_NATIVE_METHOD +Location start: JVMTI_ERROR_NATIVE_METHOD +Location end: JVMTI_ERROR_NATIVE_METHOD +Is native: true +Is obsolete: false +Is synthetic: false [add, (Ljava/lang/Object;)Z, null] interface java.util.List 1025 +Max locals: 0 +Argument size: 0 +Location start: 0 +Location end: 0 +Is native: false +Is obsolete: false +Is synthetic: false [run, ()V, null] class $Proxy0 17 +Max locals: 0 +Argument size: 0 +Location start: 0 +Location end: 0 +Is native: false +Is obsolete: false +Is synthetic: false +class Main$NestedSynthetic +4104 +Max locals: 1 +Argument size: 0 +Location start: 0 +Location end: 2 +Is native: false +Is obsolete: false +Is synthetic: true diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc index b64952d62b..fa9679db4b 100644 --- a/test/910-methods/methods.cc +++ b/test/910-methods/methods.cc @@ -114,6 +114,99 @@ extern "C" JNIEXPORT jint JNICALL Java_Main_getMethodModifiers( return modifiers; } +extern "C" JNIEXPORT jint JNICALL Java_Main_getMaxLocals( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) { + jmethodID id = env->FromReflectedMethod(method); + + jint max_locals; + jvmtiError result = jvmti_env->GetMaxLocals(id, &max_locals); + if (JvmtiErrorToException(env, result)) { + return -1; + } + + return max_locals; +} + +extern "C" JNIEXPORT jint JNICALL Java_Main_getArgumentsSize( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) { + jmethodID id = env->FromReflectedMethod(method); + + jint arguments; + jvmtiError result = jvmti_env->GetArgumentsSize(id, &arguments); + if (JvmtiErrorToException(env, result)) { + return -1; + } + + return arguments; +} + +extern "C" JNIEXPORT jlong JNICALL Java_Main_getMethodLocationStart( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) { + jmethodID id = env->FromReflectedMethod(method); + + jlong start; + jlong end; + jvmtiError result = jvmti_env->GetMethodLocation(id, &start, &end); + if (JvmtiErrorToException(env, result)) { + return -1; + } + + return start; +} + +extern "C" JNIEXPORT jlong JNICALL Java_Main_getMethodLocationEnd( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) { + jmethodID id = env->FromReflectedMethod(method); + + jlong start; + jlong end; + jvmtiError result = jvmti_env->GetMethodLocation(id, &start, &end); + if (JvmtiErrorToException(env, result)) { + return -1; + } + + return end; +} + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_isMethodNative( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) { + jmethodID id = env->FromReflectedMethod(method); + + jboolean is_native; + jvmtiError result = jvmti_env->IsMethodNative(id, &is_native); + if (JvmtiErrorToException(env, result)) { + return JNI_FALSE; + } + + return is_native; +} + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_isMethodObsolete( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) { + jmethodID id = env->FromReflectedMethod(method); + + jboolean is_obsolete; + jvmtiError result = jvmti_env->IsMethodObsolete(id, &is_obsolete); + if (JvmtiErrorToException(env, result)) { + return JNI_FALSE; + } + + return is_obsolete; +} + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_isMethodSynthetic( + JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method) { + jmethodID id = env->FromReflectedMethod(method); + + jboolean is_synthetic; + jvmtiError result = jvmti_env->IsMethodSynthetic(id, &is_synthetic); + if (JvmtiErrorToException(env, result)) { + return JNI_FALSE; + } + + return is_synthetic; +} + // Don't do anything jint OnLoad(JavaVM* vm, char* options ATTRIBUTE_UNUSED, diff --git a/test/910-methods/src/Main.java b/test/910-methods/src/Main.java index 3459134ea2..bf25a0d028 100644 --- a/test/910-methods/src/Main.java +++ b/test/910-methods/src/Main.java @@ -32,6 +32,10 @@ public class Main { testMethod("java.util.List", "add", Object.class); testMethod(getProxyClass(), "run"); + + // Find a synthetic method in the dummy inner class. Do not print the name. Javac and Jack + // disagree on the naming of synthetic accessors. + testMethod(findSyntheticMethod(), NestedSynthetic.class, false); } private static Class<?> proxyClass = null; @@ -54,8 +58,17 @@ public class Main { private static void testMethod(Class<?> base, String methodName, Class<?>... types) throws Exception { Method m = base.getDeclaredMethod(methodName, types); + testMethod(m, base, true); + } + + private static void testMethod(Method m, Class<?> base, boolean printName) { String[] result = getMethodName(m); - System.out.println(Arrays.toString(result)); + if (!result[0].equals(m.getName())) { + throw new RuntimeException("Name not equal: " + m.getName() + " vs " + result[0]); + } + if (printName) { + System.out.println(Arrays.toString(result)); + } Class<?> declClass = getMethodDeclaringClass(m); if (base != declClass) { @@ -68,9 +81,67 @@ public class Main { throw new RuntimeException("Modifiers not equal: " + m.getModifiers() + " vs " + modifiers); } System.out.println(modifiers); + + System.out.print("Max locals: "); + try { + System.out.println(getMaxLocals(m)); + } catch (RuntimeException e) { + System.out.println(e.getMessage()); + } + + System.out.print("Argument size: "); + try { + System.out.println(getArgumentsSize(m)); + } catch (RuntimeException e) { + System.out.println(e.getMessage()); + } + + System.out.print("Location start: "); + try { + System.out.println(getMethodLocationStart(m)); + } catch (RuntimeException e) { + System.out.println(e.getMessage()); + } + + System.out.print("Location end: "); + try { + System.out.println(getMethodLocationEnd(m)); + } catch (RuntimeException e) { + System.out.println(e.getMessage()); + } + + System.out.println("Is native: " + isMethodNative(m)); + System.out.println("Is obsolete: " + isMethodObsolete(m)); + System.out.println("Is synthetic: " + isMethodSynthetic(m)); + } + + private static class NestedSynthetic { + // Accessing this private field will create a synthetic accessor method; + private static String dummy; + } + + private static void dummyAccess() { + System.out.println(NestedSynthetic.dummy); + } + + private static Method findSyntheticMethod() throws Exception { + Method methods[] = NestedSynthetic.class.getDeclaredMethods(); + for (Method m : methods) { + if (m.isSynthetic()) { + return m; + } + } + throw new RuntimeException("Could not find synthetic method"); } private static native String[] getMethodName(Method m); private static native Class<?> getMethodDeclaringClass(Method m); private static native int getMethodModifiers(Method m); + private static native int getMaxLocals(Method m); + private static native int getArgumentsSize(Method m); + private static native long getMethodLocationStart(Method m); + private static native long getMethodLocationEnd(Method m); + private static native boolean isMethodNative(Method m); + private static native boolean isMethodObsolete(Method m); + private static native boolean isMethodSynthetic(Method m); } diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc index 19d82c544c..38a4f0e337 100644 --- a/test/912-classes/classes.cc +++ b/test/912-classes/classes.cc @@ -29,6 +29,20 @@ namespace art { namespace Test912Classes { +extern "C" JNIEXPORT jboolean JNICALL Java_Main_isModifiableClass( + JNIEnv* env ATTRIBUTE_UNUSED, jclass Main_klass ATTRIBUTE_UNUSED, jclass klass) { + jboolean res = JNI_FALSE; + jvmtiError result = jvmti_env->IsModifiableClass(klass, &res); + if (result != JVMTI_ERROR_NONE) { + char* err; + jvmti_env->GetErrorName(result, &err); + printf("Failure running IsModifiableClass: %s\n", err); + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err)); + return JNI_FALSE; + } + return res; +} + extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getClassSignature( JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jclass klass) { char* sig; diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt index 3507a1a676..44c861a3b7 100644 --- a/test/912-classes/expected.txt +++ b/test/912-classes/expected.txt @@ -12,13 +12,13 @@ 411 [[D, null] 411 -int interface=false array=false -$Proxy0 interface=false array=false -java.lang.Runnable interface=true array=false -java.lang.String interface=false array=false -[I interface=false array=true -[Ljava.lang.Runnable; interface=false array=true -[Ljava.lang.String; interface=false array=true +int interface=false array=false modifiable=false +$Proxy0 interface=false array=false modifiable=false +java.lang.Runnable interface=true array=false modifiable=false +java.lang.String interface=false array=false modifiable=true +[I interface=false array=true modifiable=false +[Ljava.lang.Runnable; interface=false array=true modifiable=false +[Ljava.lang.String; interface=false array=true modifiable=false [public static final int java.lang.Integer.BYTES, static final char[] java.lang.Integer.DigitOnes, static final char[] java.lang.Integer.DigitTens, public static final int java.lang.Integer.MAX_VALUE, public static final int java.lang.Integer.MIN_VALUE, public static final int java.lang.Integer.SIZE, private static final java.lang.String[] java.lang.Integer.SMALL_NEG_VALUES, private static final java.lang.String[] java.lang.Integer.SMALL_NONNEG_VALUES, public static final java.lang.Class java.lang.Integer.TYPE, static final char[] java.lang.Integer.digits, private static final long java.lang.Integer.serialVersionUID, static final int[] java.lang.Integer.sizeTable, private final int java.lang.Integer.value] [] [] diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java index 69e5a4cc58..e627d4227a 100644 --- a/test/912-classes/src/Main.java +++ b/test/912-classes/src/Main.java @@ -107,7 +107,9 @@ public class Main { private static void testClassType(Class<?> c) throws Exception { boolean isInterface = isInterface(c); boolean isArray = isArrayClass(c); - System.out.println(c.getName() + " interface=" + isInterface + " array=" + isArray); + boolean isModifiable = isModifiableClass(c); + System.out.println(c.getName() + " interface=" + isInterface + " array=" + isArray + + " modifiable=" + isModifiable); } private static void testClassFields(Class<?> c) throws Exception { @@ -149,6 +151,7 @@ public class Main { } } + private static native boolean isModifiableClass(Class<?> c); private static native String[] getClassSignature(Class<?> c); private static native boolean isInterface(Class<?> c); diff --git a/test/920-objects/build b/test/920-objects/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/920-objects/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/920-objects/expected.txt b/test/920-objects/expected.txt new file mode 100644 index 0000000000..80feeb93e4 --- /dev/null +++ b/test/920-objects/expected.txt @@ -0,0 +1,10 @@ +class java.lang.Object 8 +class java.lang.Object 8 +class [I 12 +class [I 16 +class [I 20 +class [D 16 +class [D 24 +class [D 32 +class java.lang.String 24 +class java.lang.String 24 diff --git a/test/920-objects/info.txt b/test/920-objects/info.txt new file mode 100644 index 0000000000..875a5f6ec1 --- /dev/null +++ b/test/920-objects/info.txt @@ -0,0 +1 @@ +Tests basic functions in the jvmti plugin. diff --git a/test/920-objects/objects.cc b/test/920-objects/objects.cc new file mode 100644 index 0000000000..886dd0e673 --- /dev/null +++ b/test/920-objects/objects.cc @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "objects.h" + +#include <stdio.h> + +#include "base/macros.h" +#include "jni.h" +#include "openjdkjvmti/jvmti.h" +#include "ScopedLocalRef.h" + +#include "ti-agent/common_helper.h" +#include "ti-agent/common_load.h" + +namespace art { +namespace Test920Objects { + +extern "C" JNIEXPORT jlong JNICALL Java_Main_getObjectSize( + JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jobject object) { + jlong size; + + jvmtiError result = jvmti_env->GetObjectSize(object, &size); + if (result != JVMTI_ERROR_NONE) { + char* err; + jvmti_env->GetErrorName(result, &err); + printf("Failure running GetObjectSize: %s\n", err); + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err)); + return -1; + } + + return size; +} + +extern "C" JNIEXPORT jint JNICALL Java_Main_getObjectHashCode( + JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jobject object) { + jint hash; + + jvmtiError result = jvmti_env->GetObjectHashCode(object, &hash); + if (result != JVMTI_ERROR_NONE) { + char* err; + jvmti_env->GetErrorName(result, &err); + printf("Failure running GetObjectHashCode: %s\n", err); + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err)); + return -1; + } + + return hash; +} + +// Don't do anything +jint OnLoad(JavaVM* vm, + char* options ATTRIBUTE_UNUSED, + void* reserved ATTRIBUTE_UNUSED) { + if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) { + printf("Unable to get jvmti env!\n"); + return 1; + } + SetAllCapabilities(jvmti_env); + return 0; +} + +} // namespace Test920Objects +} // namespace art diff --git a/test/920-objects/objects.h b/test/920-objects/objects.h new file mode 100644 index 0000000000..5f21e7b7cb --- /dev/null +++ b/test/920-objects/objects.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_TEST_920_OBJECTS_OBJECTS_H_ +#define ART_TEST_920_OBJECTS_OBJECTS_H_ + +#include <jni.h> + +namespace art { +namespace Test920Objects { + +jint OnLoad(JavaVM* vm, char* options, void* reserved); + +} // namespace Test920Objects +} // namespace art + +#endif // ART_TEST_920_OBJECTS_OBJECTS_H_ diff --git a/test/920-objects/run b/test/920-objects/run new file mode 100755 index 0000000000..4379349cb2 --- /dev/null +++ b/test/920-objects/run @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --experimental agents \ + --experimental runtime-plugins \ + --jvmti diff --git a/test/920-objects/src/Main.java b/test/920-objects/src/Main.java new file mode 100644 index 0000000000..5dbe1a7e58 --- /dev/null +++ b/test/920-objects/src/Main.java @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Field; +import java.util.Arrays; + +public class Main { + public static void main(String[] args) throws Exception { + System.loadLibrary(args[1]); + + doTest(); + } + + public static void doTest() throws Exception { + testObjectSize(new Object()); + testObjectSize(new Object()); + + testObjectSize(new int[0]); + testObjectSize(new int[1]); + testObjectSize(new int[2]); + + testObjectSize(new double[0]); + testObjectSize(new double[1]); + testObjectSize(new double[2]); + + testObjectSize(new String("abc")); + testObjectSize(new String("wxyz")); + + testObjectHash(); + } + + private static void testObjectSize(Object o) { + System.out.println(o.getClass() + " " + getObjectSize(o)); + } + + private static void testObjectHash() { + Object[] objects = new Object[] { + new Object(), + new Object(), + + new MyHash(1), + new MyHash(1), + new MyHash(2) + }; + + int hashes[] = new int[objects.length]; + + for (int i = 0; i < objects.length; i++) { + hashes[i] = getObjectHashCode(objects[i]); + } + + // Implementation detail: we use the identity hashcode, for simplicity. + for (int i = 0; i < objects.length; i++) { + int ihash = System.identityHashCode(objects[i]); + if (hashes[i] != ihash) { + throw new RuntimeException(objects[i] + ": " + hashes[i] + " vs " + ihash); + } + } + + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + + for (int i = 0; i < objects.length; i++) { + int newhash = getObjectHashCode(objects[i]); + if (hashes[i] != newhash) { + throw new RuntimeException(objects[i] + ": " + hashes[i] + " vs " + newhash); + } + } + } + + private static native long getObjectSize(Object o); + private static native int getObjectHashCode(Object o); + + private static class MyHash { + private int hash; + + public MyHash(int h) { + hash = h; + } + + public int hashCode() { + return hash; + } + } +} diff --git a/test/921-hello-failure/build b/test/921-hello-failure/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/921-hello-failure/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt new file mode 100644 index 0000000000..e2665ef30b --- /dev/null +++ b/test/921-hello-failure/expected.txt @@ -0,0 +1,15 @@ +hello - NewName +Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_NAMES_DONT_MATCH) +hello - NewName +hello - DifferentAccess +Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_CLASS_MODIFIERS_CHANGED) +hello - DifferentAccess +hello2 - NewInterface +Transformation error : java.lang.Exception(Failed to redefine class <LTransform2;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED) +hello2 - NewInterface +hello2 - MissingInterface +Transformation error : java.lang.Exception(Failed to redefine class <LTransform2;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED) +hello2 - MissingInterface +hello2 - ReorderInterface +Transformation error : java.lang.Exception(Failed to redefine class <LTransform2;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED) +hello2 - ReorderInterface diff --git a/test/921-hello-failure/info.txt b/test/921-hello-failure/info.txt new file mode 100644 index 0000000000..9d241c77b1 --- /dev/null +++ b/test/921-hello-failure/info.txt @@ -0,0 +1,7 @@ +Tests for redefinition failure modes. + +Tests +---- + +- NewName: The name of the class is changed +- DifferentAccess: Class access is changed from <default> to 'public' diff --git a/test/921-hello-failure/run b/test/921-hello-failure/run new file mode 100755 index 0000000000..3ef4832da2 --- /dev/null +++ b/test/921-hello-failure/run @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +./default-run "$@" --experimental agents \ + --experimental runtime-plugins \ + --jvmti diff --git a/test/921-hello-failure/src/DifferentAccess.java b/test/921-hello-failure/src/DifferentAccess.java new file mode 100644 index 0000000000..d4b16e0522 --- /dev/null +++ b/test/921-hello-failure/src/DifferentAccess.java @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; + +class DifferentAccess { + // The following is a base64 encoding of the following class. + // public class NotTransform { + // public void sayHi(String name) { + // throw new Error("Should not be called!"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" + + "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" + + "aWxlAQAOVHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3Qg" + + "YmUgY2FsbGVkIQwABwAMAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAhAAUABgAAAAAA" + + "AgABAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAEAAQALAAwAAQAJAAAA" + + "IgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAMAAQANAAAAAgAO"); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQANVRT7zleRLG4E5DhtK7OtoDxZlUQMI5eQAgAAcAAAAHhWNBIAAAAAAAAAAPwBAAAL" + + "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACIAQAACAEAAEoB" + + "AABSAQAAXwEAAHIBAACGAQAAmgEAALEBAADBAQAAxAEAAMgBAADcAQAAAQAAAAIAAAADAAAABAAA" + + "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" + + "AAAAAAAAAAABAAAAAgAAAAAAAAAGAAAAAAAAAO4BAAAAAAAAAQABAAEAAADjAQAABAAAAHAQAwAA" + + "AA4ABAACAAIAAADoAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgALTFRy" + + "YW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xh" + + "bmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0uamF2YQABVgACVkwA" + + "EmVtaXR0ZXI6IGphY2stNC4yMAAFc2F5SGkAAQAHDgADAQAHDgAAAAEBAIGABIgCAQGgAgwAAAAA" + + "AAAAAQAAAAAAAAABAAAACwAAAHAAAAACAAAABQAAAJwAAAADAAAAAgAAALAAAAAFAAAABAAAAMgA" + + "AAAGAAAAAQAAAOgAAAABIAAAAgAAAAgBAAABEAAAAQAAAEQBAAACIAAACwAAAEoBAAADIAAAAgAA" + + "AOMBAAAAIAAAAQAAAO4BAAAAEAAAAQAAAPwBAAA="); + + public static void doTest(Transform t) { + t.sayHi("DifferentAccess"); + try { + Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES); + } catch (Exception e) { + System.out.println( + "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")"); + } + t.sayHi("DifferentAccess"); + } +} diff --git a/test/921-hello-failure/src/Iface1.java b/test/921-hello-failure/src/Iface1.java new file mode 100644 index 0000000000..f53275a656 --- /dev/null +++ b/test/921-hello-failure/src/Iface1.java @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface Iface1 { + void sayHi(String s); +} diff --git a/test/921-hello-failure/src/Iface2.java b/test/921-hello-failure/src/Iface2.java new file mode 100644 index 0000000000..54cdd901c9 --- /dev/null +++ b/test/921-hello-failure/src/Iface2.java @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface Iface2 { + void sayHi(String s); +} diff --git a/test/921-hello-failure/src/Iface3.java b/test/921-hello-failure/src/Iface3.java new file mode 100644 index 0000000000..819134d30d --- /dev/null +++ b/test/921-hello-failure/src/Iface3.java @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface Iface3 { + void sayHi(String s); +} diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java new file mode 100644 index 0000000000..69c48e26cc --- /dev/null +++ b/test/921-hello-failure/src/Main.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + public static void main(String[] args) { + System.loadLibrary(args[1]); + NewName.doTest(new Transform()); + DifferentAccess.doTest(new Transform()); + NewInterface.doTest(new Transform2()); + MissingInterface.doTest(new Transform2()); + ReorderInterface.doTest(new Transform2()); + } + + // Transforms the class. This throws an exception if something goes wrong. + public static native void doCommonClassRedefinition(Class<?> target, + byte[] classfile, + byte[] dexfile) throws Exception; +} diff --git a/test/921-hello-failure/src/MissingInterface.java b/test/921-hello-failure/src/MissingInterface.java new file mode 100644 index 0000000000..d17a6defbe --- /dev/null +++ b/test/921-hello-failure/src/MissingInterface.java @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; + +class MissingInterface { + // The following is a base64 encoding of the following class. + // class Transform2 implements Iface1 { + // public void sayHi(String name) { + // throw new Error("Should not be called!"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAFwoABgAQBwARCAASCgACABMHABQHABUHABYBAAY8aW5pdD4BAAMoKVYBAARDb2Rl" + + "AQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3Vy" + + "Y2VGaWxlAQAPVHJhbnNmb3JtMi5qYXZhDAAIAAkBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91bGQg" + + "bm90IGJlIGNhbGxlZCEMAAgADQEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcvT2JqZWN0AQAGSWZh" + + "Y2UxACAABQAGAAEABwAAAAIAAAAIAAkAAQAKAAAAHQABAAEAAAAFKrcAAbEAAAABAAsAAAAGAAEA" + + "AAABAAEADAANAAEACgAAACIAAwACAAAACrsAAlkSA7cABL8AAAABAAsAAAAGAAEAAAADAAEADgAA" + + "AAIADw=="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQDiWVay8/Z0/tXQaTTI+QtwTM65gRJVMOusAgAAcAAAAHhWNBIAAAAAAAAAABgCAAAM" + + "AAAAcAAAAAYAAACgAAAAAgAAALgAAAAAAAAAAAAAAAQAAADQAAAAAQAAAPAAAACcAQAAEAEAAFoB" + + "AABiAQAAbAEAAHoBAACNAQAAoQEAALUBAADMAQAA3QEAAOABAADkAQAA+AEAAAEAAAACAAAAAwAA" + + "AAQAAAAFAAAACAAAAAgAAAAFAAAAAAAAAAkAAAAFAAAAVAEAAAEAAAAAAAAAAQABAAsAAAACAAEA" + + "AAAAAAMAAAAAAAAAAQAAAAAAAAADAAAATAEAAAcAAAAAAAAACgIAAAAAAAABAAEAAQAAAP8BAAAE" + + "AAAAcBADAAAADgAEAAIAAgAAAAQCAAAJAAAAIgACABsBBgAAAHAgAgAQACcAAAABAAAAAAAAAAEA" + + "AAAEAAY8aW5pdD4ACExJZmFjZTE7AAxMVHJhbnNmb3JtMjsAEUxqYXZhL2xhbmcvRXJyb3I7ABJM" + + "amF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxs" + + "ZWQhAA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTQuMjAABXNheUhpAAEA" + + "Bw4AAwEABw4AAAABAQCAgASQAgEBqAIMAAAAAAAAAAEAAAAAAAAAAQAAAAwAAABwAAAAAgAAAAYA" + + "AACgAAAAAwAAAAIAAAC4AAAABQAAAAQAAADQAAAABgAAAAEAAADwAAAAASAAAAIAAAAQAQAAARAA" + + "AAIAAABMAQAAAiAAAAwAAABaAQAAAyAAAAIAAAD/AQAAACAAAAEAAAAKAgAAABAAAAEAAAAYAgAA"); + + public static void doTest(Transform2 t) { + t.sayHi("MissingInterface"); + try { + Main.doCommonClassRedefinition(Transform2.class, CLASS_BYTES, DEX_BYTES); + } catch (Exception e) { + System.out.println( + "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")"); + } + t.sayHi("MissingInterface"); + } +} diff --git a/test/921-hello-failure/src/NewInterface.java b/test/921-hello-failure/src/NewInterface.java new file mode 100644 index 0000000000..fe7722261a --- /dev/null +++ b/test/921-hello-failure/src/NewInterface.java @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; + +class NewInterface { + // The following is a base64 encoding of the following class. + // class Transform2 implements Iface1, Iface2, Iface3 { + // public void sayHi(String name) { + // throw new Error("Should not be called!"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAGwoABgASBwATCAAUCgACABUHABYHABcHABgHABkHABoBAAY8aW5pdD4BAAMoKVYB" + + "AARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYB" + + "AApTb3VyY2VGaWxlAQAPVHJhbnNmb3JtMi5qYXZhDAAKAAsBAA9qYXZhL2xhbmcvRXJyb3IBABVT" + + "aG91bGQgbm90IGJlIGNhbGxlZCEMAAoADwEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcvT2JqZWN0" + + "AQAGSWZhY2UxAQAGSWZhY2UyAQAGSWZhY2UzACAABQAGAAMABwAIAAkAAAACAAAACgALAAEADAAA" + + "AB0AAQABAAAABSq3AAGxAAAAAQANAAAABgABAAAAAQABAA4ADwABAAwAAAAiAAMAAgAAAAq7AAJZ" + + "EgO3AAS/AAAAAQANAAAABgABAAAAAwABABAAAAACABE="); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQCBWnko4SMXeuXSO3fGJBp0WSlc0HLRr63UAgAAcAAAAHhWNBIAAAAAAAAAAEACAAAO" + + "AAAAcAAAAAgAAACoAAAAAgAAAMgAAAAAAAAAAAAAAAQAAADgAAAAAQAAAAABAAC0AQAAIAEAAG4B" + + "AAB2AQAAgAEAAIoBAACUAQAAogEAALUBAADJAQAA3QEAAPQBAAAFAgAACAIAAAwCAAAgAgAAAQAA" + + "AAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAoAAAAKAAAABwAAAAAAAAALAAAABwAAAGgBAAADAAAA" + + "AAAAAAMAAQANAAAABAABAAAAAAAFAAAAAAAAAAMAAAAAAAAABQAAAFwBAAAJAAAAAAAAADICAAAA" + + "AAAAAQABAAEAAAAnAgAABAAAAHAQAwAAAA4ABAACAAIAAAAsAgAACQAAACIABAAbAQgAAABwIAIA" + + "EAAnAAAAAwAAAAAAAQACAAAAAQAAAAYABjxpbml0PgAITElmYWNlMTsACExJZmFjZTI7AAhMSWZh" + + "Y2UzOwAMTFRyYW5zZm9ybTI7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7" + + "ABJMamF2YS9sYW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3JtMi5q" + + "YXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjIwAAVzYXlIaQABAAcOAAMBAAcOAAAAAQEAgIAE" + + "oAIBAbgCDAAAAAAAAAABAAAAAAAAAAEAAAAOAAAAcAAAAAIAAAAIAAAAqAAAAAMAAAACAAAAyAAA" + + "AAUAAAAEAAAA4AAAAAYAAAABAAAAAAEAAAEgAAACAAAAIAEAAAEQAAACAAAAXAEAAAIgAAAOAAAA" + + "bgEAAAMgAAACAAAAJwIAAAAgAAABAAAAMgIAAAAQAAABAAAAQAIAAA=="); + + public static void doTest(Transform2 t) { + t.sayHi("NewInterface"); + try { + Main.doCommonClassRedefinition(Transform2.class, CLASS_BYTES, DEX_BYTES); + } catch (Exception e) { + System.out.println( + "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")"); + } + t.sayHi("NewInterface"); + } +} diff --git a/test/921-hello-failure/src/NewName.java b/test/921-hello-failure/src/NewName.java new file mode 100644 index 0000000000..a6f249a2ef --- /dev/null +++ b/test/921-hello-failure/src/NewName.java @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; + +class NewName { + // class NotTransform { + // public void sayHi(String name) { + // throw new Error("Should not be called!"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" + + "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" + + "aWxlAQARTm90VHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBu" + + "b3QgYmUgY2FsbGVkIQwABwAMAQAMTm90VHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUA" + + "BgAAAAAAAgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAEAAQALAAwA" + + "AQAJAAAAIgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAMAAQANAAAAAgAO"); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQDLV95i5xnv6iUi6uIeDoY5jP5Xe9NP1AiYAgAAcAAAAHhWNBIAAAAAAAAAAAQCAAAL" + + "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACQAQAACAEAAEoB" + + "AABSAQAAYgEAAHUBAACJAQAAnQEAALABAADHAQAAygEAAM4BAADiAQAAAQAAAAIAAAADAAAABAAA" + + "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" + + "AAAAAAAAAAAAAAAAAgAAAAAAAAAFAAAAAAAAAPQBAAAAAAAAAQABAAEAAADpAQAABAAAAHAQAwAA" + + "AA4ABAACAAIAAADuAQAACQAAACIAAQAbAQYAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAOTE5v" + + "dFRyYW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZh" + + "L2xhbmcvU3RyaW5nOwARTm90VHJhbnNmb3JtLmphdmEAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAB" + + "VgACVkwAEmVtaXR0ZXI6IGphY2stNC4yMAAFc2F5SGkAAQAHDgADAQAHDgAAAAEBAICABIgCAQGg" + + "AgAADAAAAAAAAAABAAAAAAAAAAEAAAALAAAAcAAAAAIAAAAFAAAAnAAAAAMAAAACAAAAsAAAAAUA" + + "AAAEAAAAyAAAAAYAAAABAAAA6AAAAAEgAAACAAAACAEAAAEQAAABAAAARAEAAAIgAAALAAAASgEA" + + "AAMgAAACAAAA6QEAAAAgAAABAAAA9AEAAAAQAAABAAAABAIAAA=="); + + public static void doTest(Transform t) { + t.sayHi("NewName"); + try { + Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES); + } catch (Exception e) { + System.out.println( + "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")"); + } + t.sayHi("NewName"); + } +} diff --git a/test/921-hello-failure/src/ReorderInterface.java b/test/921-hello-failure/src/ReorderInterface.java new file mode 100644 index 0000000000..ce78dbc241 --- /dev/null +++ b/test/921-hello-failure/src/ReorderInterface.java @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Base64; + +class ReorderInterface { + // The following is a base64 encoding of the following class. + // class Transform2 implements Iface2, Iface1 { + // public void sayHi(String name) { + // throw new Error("Should not be called!"); + // } + // } + private static final byte[] CLASS_BYTES = Base64.getDecoder().decode( + "yv66vgAAADQAGQoABgARBwASCAATCgACABQHABUHABYHABcHABgBAAY8aW5pdD4BAAMoKVYBAARD" + + "b2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApT" + + "b3VyY2VGaWxlAQAPVHJhbnNmb3JtMi5qYXZhDAAJAAoBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91" + + "bGQgbm90IGJlIGNhbGxlZCEMAAkADgEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcvT2JqZWN0AQAG" + + "SWZhY2UyAQAGSWZhY2UxACAABQAGAAIABwAIAAAAAgAAAAkACgABAAsAAAAdAAEAAQAAAAUqtwAB" + + "sQAAAAEADAAAAAYAAQAAAAEAAQANAA4AAQALAAAAIgADAAIAAAAKuwACWRIDtwAEvwAAAAEADAAA" + + "AAYAAQAAAAMAAQAPAAAAAgAQ"); + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQChWfUC02YEHJZLC4V4pHrGMdqwD8NnzXvAAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAN" + + "AAAAcAAAAAcAAACkAAAAAgAAAMAAAAAAAAAAAAAAAAQAAADYAAAAAQAAAPgAAACoAQAAGAEAAGIB" + + "AABqAQAAdAEAAH4BAACMAQAAnwEAALMBAADHAQAA3gEAAO8BAADyAQAA9gEAAAoCAAABAAAAAgAA" + + "AAMAAAAEAAAABQAAAAYAAAAJAAAACQAAAAYAAAAAAAAACgAAAAYAAABcAQAAAgAAAAAAAAACAAEA" + + "DAAAAAMAAQAAAAAABAAAAAAAAAACAAAAAAAAAAQAAABUAQAACAAAAAAAAAAcAgAAAAAAAAEAAQAB" + + "AAAAEQIAAAQAAABwEAMAAAAOAAQAAgACAAAAFgIAAAkAAAAiAAMAGwEHAAAAcCACABAAJwAAAAIA" + + "AAABAAAAAQAAAAUABjxpbml0PgAITElmYWNlMTsACExJZmFjZTI7AAxMVHJhbnNmb3JtMjsAEUxq" + + "YXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAV" + + "U2hvdWxkIG5vdCBiZSBjYWxsZWQhAA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBq" + + "YWNrLTQuMjAABXNheUhpAAEABw4AAwEABw4AAAABAQCAgASYAgEBsAIAAAwAAAAAAAAAAQAAAAAA" + + "AAABAAAADQAAAHAAAAACAAAABwAAAKQAAAADAAAAAgAAAMAAAAAFAAAABAAAANgAAAAGAAAAAQAA" + + "APgAAAABIAAAAgAAABgBAAABEAAAAgAAAFQBAAACIAAADQAAAGIBAAADIAAAAgAAABECAAAAIAAA" + + "AQAAABwCAAAAEAAAAQAAACwCAAA="); + + public static void doTest(Transform2 t) { + t.sayHi("ReorderInterface"); + try { + Main.doCommonClassRedefinition(Transform2.class, CLASS_BYTES, DEX_BYTES); + } catch (Exception e) { + System.out.println( + "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")"); + } + t.sayHi("ReorderInterface"); + } +} diff --git a/test/921-hello-failure/src/Transform.java b/test/921-hello-failure/src/Transform.java new file mode 100644 index 0000000000..ee444f0606 --- /dev/null +++ b/test/921-hello-failure/src/Transform.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Transform { + public void sayHi(String name) { + System.out.println("hello - " + name); + } +} diff --git a/test/921-hello-failure/src/Transform2.java b/test/921-hello-failure/src/Transform2.java new file mode 100644 index 0000000000..9d949f323f --- /dev/null +++ b/test/921-hello-failure/src/Transform2.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Transform2 implements Iface1, Iface2 { + public void sayHi(String name) { + System.out.println("hello2 - " + name); + } +} diff --git a/test/922-properties/build b/test/922-properties/build new file mode 100755 index 0000000000..898e2e54a2 --- /dev/null +++ b/test/922-properties/build @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-build "$@" --experimental agents diff --git a/test/922-properties/expected.txt b/test/922-properties/expected.txt new file mode 100644 index 0000000000..0be939be3d --- /dev/null +++ b/test/922-properties/expected.txt @@ -0,0 +1,59 @@ +Recommended properties: + "java.class.path": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.library.path": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.info": OK !!!JVMTI_ERROR_NOT_AVAILABLE + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.name": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.vendor": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.version": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE +Missing recommended properties: [java.vm.info] +Other properties: + "file.encoding": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "file.separator": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.class.version": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.compiler": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.ext.dirs": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.net.preferIPv6Addresses": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.specification.name": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.specification.vendor": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.specification.version": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vendor": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vendor.url": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.version": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.specification.name": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.specification.vendor": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.specification.version": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "java.vm.vendor.url": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "line.separator": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "os.name": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE + "path.separator": OK + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE +Non-specified property: + "java.boot.class.path": ERROR !!!JVMTI_ERROR_NOT_AVAILABLE + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE +Non-specified property (2): + "a": OK !!!JVMTI_ERROR_NOT_AVAILABLE + Setting value to "abc": !!!JVMTI_ERROR_NOT_AVAILABLE diff --git a/test/922-properties/info.txt b/test/922-properties/info.txt new file mode 100644 index 0000000000..875a5f6ec1 --- /dev/null +++ b/test/922-properties/info.txt @@ -0,0 +1 @@ +Tests basic functions in the jvmti plugin. diff --git a/test/922-properties/properties.cc b/test/922-properties/properties.cc new file mode 100644 index 0000000000..b1e7fce3b5 --- /dev/null +++ b/test/922-properties/properties.cc @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "properties.h" + +#include <stdio.h> + +#include "base/macros.h" +#include "jni.h" +#include "openjdkjvmti/jvmti.h" +#include "ScopedUtfChars.h" + +#include "ti-agent/common_helper.h" +#include "ti-agent/common_load.h" + +namespace art { +namespace Test922Properties { + +extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getSystemProperties( + JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) { + jint count; + char** properties; + jvmtiError result = jvmti_env->GetSystemProperties(&count, &properties); + if (JvmtiErrorToException(env, result)) { + return nullptr; + } + + auto callback = [&](jint i) -> jstring { + char* data = properties[i]; + if (data == nullptr) { + return nullptr; + } + jstring ret = env->NewStringUTF(data); + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(data)); + return ret; + }; + jobjectArray ret = CreateObjectArray(env, count, "java/lang/String", callback); + + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(properties)); + + return ret; +} + +extern "C" JNIEXPORT jstring JNICALL Java_Main_getSystemProperty( + JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jstring key) { + ScopedUtfChars string(env, key); + if (string.c_str() == nullptr) { + return nullptr; + } + + char* value = nullptr; + jvmtiError result = jvmti_env->GetSystemProperty(string.c_str(), &value); + if (JvmtiErrorToException(env, result)) { + return nullptr; + } + + jstring ret = (value == nullptr) ? nullptr : env->NewStringUTF(value); + + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(value)); + + return ret; +} + +extern "C" JNIEXPORT void JNICALL Java_Main_setSystemProperty( + JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jstring key, jstring value) { + ScopedUtfChars key_string(env, key); + if (key_string.c_str() == nullptr) { + return; + } + ScopedUtfChars value_string(env, value); + if (value_string.c_str() == nullptr) { + return; + } + + jvmtiError result = jvmti_env->SetSystemProperty(key_string.c_str(), value_string.c_str()); + if (JvmtiErrorToException(env, result)) { + return; + } +} + +// Don't do anything +jint OnLoad(JavaVM* vm, + char* options ATTRIBUTE_UNUSED, + void* reserved ATTRIBUTE_UNUSED) { + if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) { + printf("Unable to get jvmti env!\n"); + return 1; + } + SetAllCapabilities(jvmti_env); + return 0; +} + +} // namespace Test922Properties +} // namespace art diff --git a/test/922-properties/properties.h b/test/922-properties/properties.h new file mode 100644 index 0000000000..84feb10758 --- /dev/null +++ b/test/922-properties/properties.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_TEST_922_PROPERTIES_PROPERTIES_H_ +#define ART_TEST_922_PROPERTIES_PROPERTIES_H_ + +#include <jni.h> + +namespace art { +namespace Test922Properties { + +jint OnLoad(JavaVM* vm, char* options, void* reserved); + +} // namespace Test922Properties +} // namespace art + +#endif // ART_TEST_922_PROPERTIES_PROPERTIES_H_ diff --git a/test/922-properties/run b/test/922-properties/run new file mode 100755 index 0000000000..4379349cb2 --- /dev/null +++ b/test/922-properties/run @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --experimental agents \ + --experimental runtime-plugins \ + --jvmti diff --git a/test/922-properties/src/Main.java b/test/922-properties/src/Main.java new file mode 100644 index 0000000000..6cec6e97f2 --- /dev/null +++ b/test/922-properties/src/Main.java @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Set; +import java.util.TreeSet; + +public class Main { + public static void main(String[] args) throws Exception { + System.loadLibrary(args[1]); + + doTest(); + } + + public static void doTest() throws Exception { + Set<String> recommendedProperties = getRecommendedProperties(); + + System.out.println("Recommended properties:"); + for (String key : recommendedProperties) { + checkProperty(key); + } + + Set<String> allProperties = getAllProperties(); + + Set<String> retained = new TreeSet<String>(recommendedProperties); + retained.retainAll(allProperties); + if (!retained.equals(recommendedProperties)) { + Set<String> missing = new TreeSet<String>(recommendedProperties); + missing.removeAll(retained); + System.out.println("Missing recommended properties: " + missing); + } + + Set<String> nonRecommended = new TreeSet<String>(allProperties); + nonRecommended.removeAll(recommendedProperties); + + System.out.println("Other properties:"); + for (String key : nonRecommended) { + checkProperty(key); + } + + System.out.println("Non-specified property:"); + String key = generate(allProperties); + checkProperty(key); + + System.out.println("Non-specified property (2):"); + String key2 = generateUnique(allProperties); + checkProperty(key2); + } + + private static Set<String> getRecommendedProperties() { + Set<String> keys = new TreeSet<String>(); + keys.add("java.vm.vendor"); + keys.add("java.vm.version"); + keys.add("java.vm.name"); + keys.add("java.vm.info"); + keys.add("java.library.path"); + keys.add("java.class.path"); + return keys; + } + + private static Set<String> getAllProperties() { + Set<String> keys = new TreeSet<String>(); + String[] props = getSystemProperties(); + for (String p : props) { + keys.add(p); + } + return keys; + } + + private static boolean equals(String s1, String s2) { + if (s1 == null && s2 == null) { + return true; + } else if (s1 != null) { + return s1.equals(s2); + } else { + return false; + } + } + + private static void checkProperty(String key) { + System.out.print(" \"" + key + "\": "); + String err = null; + String value = null; + try { + value = getSystemProperty(key); + } catch (RuntimeException e) { + err = e.getMessage(); + } + String sysValue = System.getProperty(key); + if (equals(value, sysValue)) { + System.out.print("OK"); + if (err != null) { + System.out.println(" !!!" + err); + } else { + System.out.println(); + } + } else { + System.out.println("ERROR !!!" + err); + } + + System.out.print(" Setting value to \"abc\": "); + try { + setSystemProperty(key, "abc"); + System.out.println("SUCCEEDED"); + } catch (RuntimeException e) { + System.out.println("!!!" + e.getMessage()); + } + } + + private static String generateUnique(Set<String> others) { + // Construct something. To be deterministic, just use "a+". + StringBuilder sb = new StringBuilder("a"); + for (;;) { + String key = sb.toString(); + if (!others.contains(key)) { + return key; + } + sb.append('a'); + } + } + + private static String generate(Set<String> others) { + // First check for something in the overall System properties. + TreeSet<String> sysProps = new TreeSet<String>(System.getProperties().stringPropertyNames()); + sysProps.removeAll(others); + if (!sysProps.isEmpty()) { + // Find something that starts with "java" or "os," trying to be platform-independent. + for (String s: sysProps) { + if (s.startsWith("java.") || s.startsWith("os.")) { + return s; + } + } + // Just return the first thing. + return sysProps.iterator().next(); + } + + return generateUnique(others); + } + + private static native String[] getSystemProperties(); + private static native String getSystemProperty(String key); + private static native void setSystemProperty(String key, String value); +} diff --git a/test/Android.bp b/test/Android.bp index 26c4f93d9a..f6648d1cdc 100644 --- a/test/Android.bp +++ b/test/Android.bp @@ -261,6 +261,8 @@ art_cc_defaults { "912-classes/classes.cc", "913-heaps/heaps.cc", "918-fields/fields.cc", + "920-objects/objects.cc", + "922-properties/properties.cc", ], shared_libs: [ "libbase", diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 670b1033e4..a3f6864883 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -292,6 +292,9 @@ TEST_ART_BROKEN_TARGET_TESTS += \ 917-fields-transformation \ 918-fields \ 919-obsolete-fields \ + 920-objects \ + 921-hello-failure \ + 922-properties \ ifneq (,$(filter target,$(TARGET_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \ @@ -1023,10 +1026,11 @@ define define-test-art-run-test test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES run_test_options += --no-image # Add the core dependency. This is required for pre-building. + # Use the PIC image, as it is the default in run-test, to match dependencies. ifeq ($(1),host) - prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(13)) + prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_$(13)) else - prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(13)) + prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_$(13)) endif else ifeq ($(9),npicimage) diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc index ebf1e4621c..6f98f10072 100644 --- a/test/ti-agent/common_helper.cc +++ b/test/ti-agent/common_helper.cc @@ -17,6 +17,7 @@ #include "ti-agent/common_helper.h" #include <stdio.h> +#include <sstream> #include "art_method.h" #include "jni.h" @@ -39,10 +40,46 @@ void SetAllCapabilities(jvmtiEnv* env) { env->AddCapabilities(&caps); } +bool JvmtiErrorToException(JNIEnv* env, jvmtiError error) { + if (error == JVMTI_ERROR_NONE) { + return false; + } + + ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException")); + if (rt_exception.get() == nullptr) { + // CNFE should be pending. + return true; + } + + char* err; + jvmti_env->GetErrorName(error, &err); + + env->ThrowNew(rt_exception.get(), err); + + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err)); + return true; +} + namespace common_redefine { +static void throwRedefinitionError(jvmtiEnv* jvmti, JNIEnv* env, jclass target, jvmtiError res) { + std::stringstream err; + char* signature = nullptr; + char* generic = nullptr; + jvmti->GetClassSignature(target, &signature, &generic); + char* error = nullptr; + jvmti->GetErrorName(res, &error); + err << "Failed to redefine class <" << signature << "> due to " << error; + std::string message = err.str(); + jvmti->Deallocate(reinterpret_cast<unsigned char*>(signature)); + jvmti->Deallocate(reinterpret_cast<unsigned char*>(generic)); + jvmti->Deallocate(reinterpret_cast<unsigned char*>(error)); + env->ThrowNew(env->FindClass("java/lang/Exception"), message.c_str()); +} + using RedefineDirectFunction = jvmtiError (*)(jvmtiEnv*, jclass, jint, const unsigned char*); -static void DoClassTransformation(jvmtiEnv* jvmti_env, JNIEnv* env, +static void DoClassTransformation(jvmtiEnv* jvmti_env, + JNIEnv* env, jclass target, jbyteArray class_file_bytes, jbyteArray dex_file_bytes) { @@ -63,7 +100,7 @@ static void DoClassTransformation(jvmtiEnv* jvmti_env, JNIEnv* env, res = f(jvmti_env, target, len, redef_bytes); } if (res != JVMTI_ERROR_NONE) { - printf("Redefinition failed!"); + throwRedefinitionError(jvmti_env, env, target, res); } } diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h index 76543fed23..642ca03274 100644 --- a/test/ti-agent/common_helper.h +++ b/test/ti-agent/common_helper.h @@ -65,6 +65,8 @@ static jobjectArray CreateObjectArray(JNIEnv* env, void SetAllCapabilities(jvmtiEnv* env); +bool JvmtiErrorToException(JNIEnv* env, jvmtiError error); + } // namespace art #endif // ART_TEST_TI_AGENT_COMMON_HELPER_H_ diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc index d60cff364a..e309a8920b 100644 --- a/test/ti-agent/common_load.cc +++ b/test/ti-agent/common_load.cc @@ -38,6 +38,8 @@ #include "912-classes/classes.h" #include "913-heaps/heaps.h" #include "918-fields/fields.h" +#include "920-objects/objects.h" +#include "922-properties/properties.h" namespace art { @@ -73,6 +75,9 @@ AgentLib agents[] = { { "917-fields-transformation", common_redefine::OnLoad, nullptr }, { "918-fields", Test918Fields::OnLoad, nullptr }, { "919-obsolete-fields", common_redefine::OnLoad, nullptr }, + { "920-objects", Test920Objects::OnLoad, nullptr }, + { "921-hello-failure", common_redefine::OnLoad, nullptr }, + { "922-properties", Test922Properties::OnLoad, nullptr }, }; static AgentLib* FindAgent(char* name) { diff --git a/test/valgrind-target-suppressions.txt b/test/valgrind-target-suppressions.txt index fbc99b12ce..452a17412a 100644 --- a/test/valgrind-target-suppressions.txt +++ b/test/valgrind-target-suppressions.txt @@ -36,8 +36,7 @@ MemCpySelfAssign Memcheck:Overlap fun:memcpy - fun:je_tsd_set - fun:je_tsd_fetch + ... fun:je_malloc_tsd_boot0 } @@ -59,3 +58,12 @@ ... fun:_ZN3art7Runtime17InitNativeMethodsEv } + +# art::MemMap::MapInternal() uses msync() to check for the existence of memory mappings. +{ + art::MemMap::MapInternal() + Memcheck:Param + msync(start) + fun:msync + fun:_ZN3art6MemMap11MapInternalEPvmiiilb +} |