diff options
54 files changed, 872 insertions, 498 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index 33c2a8eb9e..1e2cfa3e97 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -157,19 +157,14 @@ ART_HOST_CODEGEN_ARCHS ?= all ifeq ($(ART_TARGET_CODEGEN_ARCHS),all) ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH)) - # We need to handle the fact that some compiler tests mix code from different architectures. - ART_TARGET_COMPILER_TESTS ?= true else - ART_TARGET_COMPILER_TESTS := false ifeq ($(ART_TARGET_CODEGEN_ARCHS),svelte) ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_ARCH_64) $(ART_TARGET_ARCH_32)) endif endif ifeq ($(ART_HOST_CODEGEN_ARCHS),all) ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH)) - ART_HOST_COMPILER_TESTS ?= true else - ART_HOST_COMPILER_TESTS := false ifeq ($(ART_HOST_CODEGEN_ARCHS),svelte) ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_CODEGEN_ARCHS) $(ART_HOST_ARCH_64) $(ART_HOST_ARCH_32)) endif diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 3d07fc0ca8..c538c4f03a 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -299,13 +299,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \ COMPILER_GTEST_COMMON_SRC_FILES_all := \ compiler/jni/jni_cfi_test.cc \ compiler/optimizing/codegen_test.cc \ - compiler/optimizing/constant_folding_test.cc \ - compiler/optimizing/dead_code_elimination_test.cc \ - compiler/optimizing/linearize_test.cc \ - compiler/optimizing/liveness_test.cc \ - compiler/optimizing/live_ranges_test.cc \ compiler/optimizing/optimizing_cfi_test.cc \ - compiler/optimizing/register_allocator_test.cc \ COMPILER_GTEST_COMMON_SRC_FILES_arm := \ compiler/linker/arm/relative_patcher_thumb2_test.cc \ @@ -325,6 +319,16 @@ COMPILER_GTEST_COMMON_SRC_FILES_x86 := \ compiler/linker/x86/relative_patcher_x86_test.cc \ compiler/utils/x86/managed_register_x86_test.cc \ +# These tests are testing architecture-independent functionality, but happen +# to use x86 codegen as part of the test. +COMPILER_GTEST_COMMON_SRC_FILES_x86 += \ + compiler/optimizing/constant_folding_test.cc \ + compiler/optimizing/dead_code_elimination_test.cc \ + compiler/optimizing/linearize_test.cc \ + compiler/optimizing/live_ranges_test.cc \ + compiler/optimizing/liveness_test.cc \ + compiler/optimizing/register_allocator_test.cc \ + COMPILER_GTEST_COMMON_SRC_FILES_x86_64 := \ compiler/linker/x86_64/relative_patcher_x86_64_test.cc \ @@ -359,9 +363,7 @@ COMPILER_GTEST_TARGET_SRC_FILES_x86_64 := \ $(COMPILER_GTEST_COMMON_SRC_FILES_x86_64) \ $(foreach arch,$(ART_TARGET_CODEGEN_ARCHS),$(eval COMPILER_GTEST_TARGET_SRC_FILES += $$(COMPILER_GTEST_TARGET_SRC_FILES_$(arch)))) -ifeq (true,$(ART_TARGET_COMPILER_TESTS)) - COMPILER_GTEST_TARGET_SRC_FILES += $(COMPILER_GTEST_TARGET_SRC_FILES_all) -endif +COMPILER_GTEST_TARGET_SRC_FILES += $(COMPILER_GTEST_TARGET_SRC_FILES_all) COMPILER_GTEST_HOST_SRC_FILES := \ $(COMPILER_GTEST_COMMON_SRC_FILES) \ @@ -396,9 +398,7 @@ COMPILER_GTEST_HOST_SRC_FILES_x86_64 := \ compiler/utils/x86_64/assembler_x86_64_test.cc $(foreach arch,$(ART_HOST_CODEGEN_ARCHS),$(eval COMPILER_GTEST_HOST_SRC_FILES += $$(COMPILER_GTEST_HOST_SRC_FILES_$(arch)))) -ifeq (true,$(ART_HOST_COMPILER_TESTS)) - COMPILER_GTEST_HOST_SRC_FILES += $(COMPILER_GTEST_HOST_SRC_FILES_all) -endif +COMPILER_GTEST_HOST_SRC_FILES += $(COMPILER_GTEST_HOST_SRC_FILES_all) ART_TEST_CFLAGS := diff --git a/compiler/Android.mk b/compiler/Android.mk index 6c6d99f616..410b2d05f2 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -47,7 +47,6 @@ LIBART_COMPILER_SRC_FILES := \ optimizing/code_generator_utils.cc \ optimizing/constant_folding.cc \ optimizing/dead_code_elimination.cc \ - optimizing/dex_cache_array_fixups_arm.cc \ optimizing/graph_checker.cc \ optimizing/graph_visualizer.cc \ optimizing/gvn.cc \ @@ -61,7 +60,6 @@ LIBART_COMPILER_SRC_FILES := \ optimizing/load_store_elimination.cc \ optimizing/locations.cc \ optimizing/nodes.cc \ - optimizing/nodes_arm64.cc \ optimizing/optimization.cc \ optimizing/optimizing_compiler.cc \ optimizing/parallel_move_resolver.cc \ @@ -78,7 +76,6 @@ LIBART_COMPILER_SRC_FILES := \ optimizing/ssa_liveness_analysis.cc \ optimizing/ssa_phi_elimination.cc \ optimizing/stack_map_stream.cc \ - optimizing/x86_memory_gen.cc \ trampolines/trampoline_compiler.cc \ utils/assembler.cc \ utils/jni_macro_assembler.cc \ @@ -94,6 +91,7 @@ LIBART_COMPILER_SRC_FILES_arm := \ linker/arm/relative_patcher_arm_base.cc \ linker/arm/relative_patcher_thumb2.cc \ optimizing/code_generator_arm.cc \ + optimizing/dex_cache_array_fixups_arm.cc \ optimizing/intrinsics_arm.cc \ utils/arm/assembler_arm.cc \ utils/arm/assembler_arm32.cc \ @@ -109,6 +107,7 @@ LIBART_COMPILER_SRC_FILES_arm64 := \ $(LIBART_COMPILER_SRC_FILES_arm) \ jni/quick/arm64/calling_convention_arm64.cc \ linker/arm64/relative_patcher_arm64.cc \ + optimizing/nodes_arm64.cc \ optimizing/code_generator_arm64.cc \ optimizing/instruction_simplifier_arm.cc \ optimizing/instruction_simplifier_arm64.cc \ @@ -144,6 +143,7 @@ LIBART_COMPILER_SRC_FILES_x86 := \ optimizing/code_generator_x86.cc \ optimizing/intrinsics_x86.cc \ optimizing/pc_relative_fixups_x86.cc \ + optimizing/x86_memory_gen.cc \ utils/x86/assembler_x86.cc \ utils/x86/jni_macro_assembler_x86.cc \ utils/x86/managed_register_x86.cc \ diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h index f8b7460935..c754e5588c 100644 --- a/compiler/cfi_test.h +++ b/compiler/cfi_test.h @@ -22,11 +22,13 @@ #include <sstream> #include "arch/instruction_set.h" +#include "base/enums.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/dwarf_test.h" #include "debug/dwarf/headers.h" #include "disassembler/disassembler.h" #include "gtest/gtest.h" +#include "thread.h" namespace art { @@ -57,7 +59,13 @@ class CFITest : public dwarf::DwarfTest { // Pretty-print assembly. const uint8_t* asm_base = actual_asm.data(); const uint8_t* asm_end = asm_base + actual_asm.size(); - auto* opts = new DisassemblerOptions(false, asm_base, asm_end, true); + auto* opts = new DisassemblerOptions(false, + asm_base, + asm_end, + true, + is64bit + ? &Thread::DumpThreadOffset<PointerSize::k64> + : &Thread::DumpThreadOffset<PointerSize::k32>); std::unique_ptr<Disassembler> disasm(Disassembler::Create(isa, opts)); std::stringstream stream; const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0); diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index efae4d0583..bb459996e3 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -52,6 +52,7 @@ #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" +#include "mirror/dex_cache.h" #include "mirror/dex_cache-inl.h" #include "mirror/method.h" #include "mirror/object-inl.h" @@ -1418,6 +1419,9 @@ void ImageWriter::CalculateNewObjectOffsets() { bin_offset = RoundUp(bin_offset, method_alignment); break; } + case kBinDexCacheArray: + bin_offset = RoundUp(bin_offset, DexCacheArraysLayout::Alignment()); + break; case kBinImTable: case kBinIMTConflictTable: { bin_offset = RoundUp(bin_offset, static_cast<size_t>(target_ptr_size_)); @@ -2034,7 +2038,7 @@ void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache, // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e. // static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))). - GcRoot<mirror::String>* orig_strings = orig_dex_cache->GetStrings(); + mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings(); if (orig_strings != nullptr) { copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(), NativeLocationInImage(orig_strings), diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 524ce4d34e..4b056f552a 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -104,12 +104,24 @@ class JNICFITest : public CFITest { TestImpl(isa, #isa, expected_asm, expected_cfi); \ } +#ifdef ART_ENABLE_CODEGEN_arm TEST_ISA(kThumb2) +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 TEST_ISA(kArm64) +#endif +#ifdef ART_ENABLE_CODEGEN_x86 TEST_ISA(kX86) +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 TEST_ISA(kX86_64) +#endif +#ifdef ART_ENABLE_CODEGEN_mips TEST_ISA(kMips) +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 TEST_ISA(kMips64) +#endif #endif // ART_TARGET_ANDROID diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 8273b15667..8a809822df 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -1189,8 +1189,13 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { - mirror::DexCache* dex_cache = GetDexCache(patch.TargetStringDexFile()); - mirror::String* string = dex_cache->GetResolvedString(patch.TargetStringIndex()); + ScopedObjectAccessUnchecked soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache(patch.TargetStringDexFile()))); + mirror::String* string = linker->LookupString(*patch.TargetStringDexFile(), + patch.TargetStringIndex(), + dex_cache); DCHECK(string != nullptr); DCHECK(writer_->HasBootImage() || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string)); diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 2d95235d13..404f044cef 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -5589,55 +5589,15 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address)); return; // No dex cache slow path. } - case HLoadString::LoadKind::kDexCacheAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); - // 16-bit LDR immediate has a 5-bit offset multiplied by the size and that gives - // a 128B range. To try and reduce the number of literals if we load multiple strings, - // simply split the dex cache address to a 128B aligned base loaded from a literal - // and the remaining offset embedded in the load. - static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes."); - DCHECK_ALIGNED(load->GetAddress(), 4u); - constexpr size_t offset_bits = /* encoded bits */ 5 + /* scale */ 2; - uint32_t base_address = address & ~MaxInt<uint32_t>(offset_bits); - uint32_t offset = address & MaxInt<uint32_t>(offset_bits); - __ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address)); - // /* GcRoot<mirror::String> */ out = *(base_address + offset) - GenerateGcRootFieldLoad(load, out_loc, out, offset); - break; - } - case HLoadString::LoadKind::kDexCachePcRelative: { - Register base_reg = locations->InAt(0).AsRegister<Register>(); - HArmDexCacheArraysBase* base = load->InputAt(0)->AsArmDexCacheArraysBase(); - int32_t offset = load->GetDexCacheElementOffset() - base->GetElementOffset(); - // /* GcRoot<mirror::String> */ out = *(dex_cache_arrays_base + offset) - GenerateGcRootFieldLoad(load, out_loc, base_reg, offset); - break; - } - case HLoadString::LoadKind::kDexCacheViaMethod: { - Register current_method = locations->InAt(0).AsRegister<Register>(); - - // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ - GenerateGcRootFieldLoad( - load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value()); - // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_ - __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); - // /* GcRoot<mirror::String> */ out = out[string_index] - GenerateGcRootFieldLoad( - load, out_loc, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); - break; - } default: - LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind(); - UNREACHABLE(); + break; } - if (!load->IsInDexCache()) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load); - codegen_->AddSlowPath(slow_path); - __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); - __ Bind(slow_path->GetExitLabel()); - } + // TODO: Re-add the compiler code to do string dex cache lookup again. + SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load); + codegen_->AddSlowPath(slow_path); + __ b(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } static int32_t GetExceptionTlsOffset() { diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 14f5b24494..122c174eae 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -4197,7 +4197,6 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { } void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { - Location out_loc = load->GetLocations()->Out(); Register out = OutputRegister(load); switch (load->GetLoadKind()) { @@ -4233,63 +4232,15 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) { __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress())); return; // No dex cache slow path. } - case HLoadString::LoadKind::kDexCacheAddress: { - DCHECK_NE(load->GetAddress(), 0u); - // LDR immediate has a 12-bit offset multiplied by the size and for 32-bit loads - // that gives a 16KiB range. To try and reduce the number of literals if we load - // multiple strings, simply split the dex cache address to a 16KiB aligned base - // loaded from a literal and the remaining offset embedded in the load. - static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes."); - DCHECK_ALIGNED(load->GetAddress(), 4u); - constexpr size_t offset_bits = /* encoded bits */ 12 + /* scale */ 2; - uint64_t base_address = load->GetAddress() & ~MaxInt<uint64_t>(offset_bits); - uint32_t offset = load->GetAddress() & MaxInt<uint64_t>(offset_bits); - __ Ldr(out.X(), codegen_->DeduplicateDexCacheAddressLiteral(base_address)); - // /* GcRoot<mirror::String> */ out = *(base_address + offset) - GenerateGcRootFieldLoad(load, out_loc, out.X(), offset); - break; - } - case HLoadString::LoadKind::kDexCachePcRelative: { - // Add ADRP with its PC-relative DexCache access patch. - const DexFile& dex_file = load->GetDexFile(); - uint32_t element_offset = load->GetDexCacheElementOffset(); - vixl::aarch64::Label* adrp_label = - codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset); - { - SingleEmissionCheckScope guard(GetVIXLAssembler()); - __ Bind(adrp_label); - __ adrp(out.X(), /* offset placeholder */ 0); - } - // Add LDR with its PC-relative DexCache access patch. - vixl::aarch64::Label* ldr_label = - codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label); - // /* GcRoot<mirror::String> */ out = *(base_address + offset) /* PC-relative */ - GenerateGcRootFieldLoad(load, out_loc, out.X(), /* offset placeholder */ 0, ldr_label); - break; - } - case HLoadString::LoadKind::kDexCacheViaMethod: { - Register current_method = InputRegisterAt(load, 0); - // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ - GenerateGcRootFieldLoad( - load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value()); - // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_ - __ Ldr(out.X(), HeapOperand(out, mirror::Class::DexCacheStringsOffset().Uint32Value())); - // /* GcRoot<mirror::String> */ out = out[string_index] - GenerateGcRootFieldLoad( - load, out_loc, out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex())); - break; - } default: - LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind(); - UNREACHABLE(); + break; } - if (!load->IsInDexCache()) { - SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load); - codegen_->AddSlowPath(slow_path); - __ Cbz(out, slow_path->GetEntryLabel()); - __ Bind(slow_path->GetExitLabel()); - } + // TODO: Re-add the compiler code to do string dex cache lookup again. + SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load); + codegen_->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 58879bc2f1..a7fbc84340 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -4580,11 +4580,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) { case HLoadString::LoadKind::kBootImageLinkTimePcRelative: base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>(); break; - // We need an extra register for PC-relative dex cache accesses. - case HLoadString::LoadKind::kDexCachePcRelative: - case HLoadString::LoadKind::kDexCacheViaMethod: - base_or_current_method_reg = locations->InAt(0).AsRegister<Register>(); - break; default: base_or_current_method_reg = ZERO; break; @@ -4628,52 +4623,15 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) { codegen_->DeduplicateBootImageAddressLiteral(address)); return; // No dex cache slow path. } - case HLoadString::LoadKind::kDexCacheAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); - static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes."); - DCHECK_ALIGNED(load->GetAddress(), 4u); - int16_t offset = Low16Bits(address); - uint32_t base_address = address - offset; // This accounts for offset sign extension. - __ Lui(out, High16Bits(base_address)); - // /* GcRoot<mirror::String> */ out = *(base_address + offset) - GenerateGcRootFieldLoad(load, out_loc, out, offset); - break; - } - case HLoadString::LoadKind::kDexCachePcRelative: { - HMipsDexCacheArraysBase* base = load->InputAt(0)->AsMipsDexCacheArraysBase(); - int32_t offset = - load->GetDexCacheElementOffset() - base->GetElementOffset() - kDexCacheArrayLwOffset; - // /* GcRoot<mirror::String> */ out = *(dex_cache_arrays_base + offset) - GenerateGcRootFieldLoad(load, out_loc, base_or_current_method_reg, offset); - break; - } - case HLoadString::LoadKind::kDexCacheViaMethod: { - // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ - GenerateGcRootFieldLoad(load, - out_loc, - base_or_current_method_reg, - ArtMethod::DeclaringClassOffset().Int32Value()); - // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_ - __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); - // /* GcRoot<mirror::String> */ out = out[string_index] - GenerateGcRootFieldLoad(load, - out_loc, - out, - CodeGenerator::GetCacheOffset(load->GetStringIndex())); - break; - } default: - LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind(); - UNREACHABLE(); + break; } - if (!load->IsInDexCache()) { - SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load); - codegen_->AddSlowPath(slow_path); - __ Beqz(out, slow_path->GetEntryLabel()); - __ Bind(slow_path->GetExitLabel()); - } + // TODO: Re-add the compiler code to do string dex cache lookup again. + SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load); + codegen_->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) { diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 4e7a2728b1..4a5755c925 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -3261,22 +3261,11 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) { } void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) { - LocationSummary* locations = load->GetLocations(); - GpuRegister out = locations->Out().AsRegister<GpuRegister>(); - GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>(); - __ LoadFromOffset(kLoadUnsignedWord, out, current_method, - ArtMethod::DeclaringClassOffset().Int32Value()); - __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); - __ LoadFromOffset( - kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); - // TODO: We will need a read barrier here. - - if (!load->IsInDexCache()) { - SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load); - codegen_->AddSlowPath(slow_path); - __ Beqzc(out, slow_path->GetEntryLabel()); - __ Bind(slow_path->GetExitLabel()); - } + // TODO: Re-add the compiler code to do string dex cache lookup again. + SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load); + codegen_->AddSlowPath(slow_path); + __ Bc(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 87853a6cca..7aca16f867 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -6231,48 +6231,15 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { codegen_->RecordSimplePatch(); return; // No dex cache slow path. } - case HLoadString::LoadKind::kDexCacheAddress: { - DCHECK_NE(load->GetAddress(), 0u); - uint32_t address = dchecked_integral_cast<uint32_t>(load->GetAddress()); - // /* GcRoot<mirror::String> */ out = *address - GenerateGcRootFieldLoad(load, out_loc, Address::Absolute(address)); - break; - } - case HLoadString::LoadKind::kDexCachePcRelative: { - Register base_reg = locations->InAt(0).AsRegister<Register>(); - uint32_t offset = load->GetDexCacheElementOffset(); - Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(load->GetDexFile(), offset); - // /* GcRoot<mirror::String> */ out = *(base + offset) /* PC-relative */ - GenerateGcRootFieldLoad( - load, out_loc, Address(base_reg, CodeGeneratorX86::kDummy32BitOffset), fixup_label); - break; - } - case HLoadString::LoadKind::kDexCacheViaMethod: { - Register current_method = locations->InAt(0).AsRegister<Register>(); - - // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ - GenerateGcRootFieldLoad( - load, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value())); - - // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_ - __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); - // /* GcRoot<mirror::String> */ out = out[string_index] - GenerateGcRootFieldLoad( - load, out_loc, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); - break; - } default: - LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind(); - UNREACHABLE(); + break; } - if (!load->IsInDexCache()) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load); - codegen_->AddSlowPath(slow_path); - __ testl(out, out); - __ j(kEqual, slow_path->GetEntryLabel()); - __ Bind(slow_path->GetExitLabel()); - } + // TODO: Re-add the compiler code to do string dex cache lookup again. + SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load); + codegen_->AddSlowPath(slow_path); + __ jmp(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } static Address GetExceptionTlsAddress() { diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 253eae66f3..0c55ae44de 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -5636,53 +5636,15 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { codegen_->RecordSimplePatch(); return; // No dex cache slow path. } - case HLoadString::LoadKind::kDexCacheAddress: { - DCHECK_NE(load->GetAddress(), 0u); - // /* GcRoot<mirror::String> */ out = *address - if (IsUint<32>(load->GetAddress())) { - Address address = Address::Absolute(load->GetAddress(), /* no_rip */ true); - GenerateGcRootFieldLoad(load, out_loc, address); - } else { - // TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address). - __ movq(out, Immediate(load->GetAddress())); - GenerateGcRootFieldLoad(load, out_loc, Address(out, 0)); - } - break; - } - case HLoadString::LoadKind::kDexCachePcRelative: { - uint32_t offset = load->GetDexCacheElementOffset(); - Label* fixup_label = codegen_->NewPcRelativeDexCacheArrayPatch(load->GetDexFile(), offset); - Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, - /* no_rip */ false); - // /* GcRoot<mirror::String> */ out = *address /* PC-relative */ - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label); - break; - } - case HLoadString::LoadKind::kDexCacheViaMethod: { - CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>(); - - // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ - GenerateGcRootFieldLoad( - load, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value())); - // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_ - __ movq(out, Address(out, mirror::Class::DexCacheStringsOffset().Uint32Value())); - // /* GcRoot<mirror::String> */ out = out[string_index] - GenerateGcRootFieldLoad( - load, out_loc, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); - break; - } default: - LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind(); - UNREACHABLE(); + break; } - if (!load->IsInDexCache()) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load); - codegen_->AddSlowPath(slow_path); - __ testl(out, out); - __ j(kEqual, slow_path->GetEntryLabel()); - __ Bind(slow_path->GetExitLabel()); - } + // TODO: Re-add the compiler code to do string dex cache lookup again. + SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load); + codegen_->AddSlowPath(slow_path); + __ jmp(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); } static Address GetExceptionTlsAddress() { diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 18db507c48..fe6c0a305e 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -29,12 +29,6 @@ #include "arch/x86_64/instruction_set_features_x86_64.h" #include "base/macros.h" #include "builder.h" -#include "code_generator_arm.h" -#include "code_generator_arm64.h" -#include "code_generator_mips.h" -#include "code_generator_mips64.h" -#include "code_generator_x86.h" -#include "code_generator_x86_64.h" #include "code_simulator_container.h" #include "common_compiler_test.h" #include "dex_file.h" @@ -52,10 +46,35 @@ #include "utils/mips64/managed_register_mips64.h" #include "utils/x86/managed_register_x86.h" +#ifdef ART_ENABLE_CODEGEN_arm +#include "code_generator_arm.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "code_generator_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +#include "code_generator_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 +#include "code_generator_x86_64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips +#include "code_generator_mips.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips64 +#include "code_generator_mips64.h" +#endif + #include "gtest/gtest.h" namespace art { +#ifdef ART_ENABLE_CODEGEN_arm // Provide our own codegen, that ensures the C calling conventions // are preserved. Currently, ART and C do not match as R4 is caller-save // in ART, and callee-save in C. Alternatively, we could use or write @@ -80,7 +99,9 @@ class TestCodeGeneratorARM : public arm::CodeGeneratorARM { blocked_register_pairs_[arm::R6_R7] = false; } }; +#endif +#ifdef ART_ENABLE_CODEGEN_x86 class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 { public: TestCodeGeneratorX86(HGraph* graph, @@ -105,6 +126,7 @@ class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 { blocked_register_pairs_[x86::ECX_EDI] = false; } }; +#endif class InternalCodeAllocator : public CodeAllocator { public: @@ -234,37 +256,54 @@ static void RunCode(InstructionSet target_isa, bool has_result, Expected expected) { CompilerOptions compiler_options; +#ifdef ART_ENABLE_CODEGEN_arm if (target_isa == kArm || target_isa == kThumb2) { std::unique_ptr<const ArmInstructionSetFeatures> features_arm( ArmInstructionSetFeatures::FromCppDefines()); TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options); RunCode(&codegenARM, graph, hook_before_codegen, has_result, expected); - } else if (target_isa == kArm64) { + } +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + if (target_isa == kArm64) { std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64( Arm64InstructionSetFeatures::FromCppDefines()); arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options); RunCode(&codegenARM64, graph, hook_before_codegen, has_result, expected); - } else if (target_isa == kX86) { + } +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + if (target_isa == kX86) { std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); TestCodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options); RunCode(&codegenX86, graph, hook_before_codegen, has_result, expected); - } else if (target_isa == kX86_64) { + } +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + if (target_isa == kX86_64) { std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64( X86_64InstructionSetFeatures::FromCppDefines()); x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options); RunCode(&codegenX86_64, graph, hook_before_codegen, has_result, expected); - } else if (target_isa == kMips) { + } +#endif +#ifdef ART_ENABLE_CODEGEN_mips + if (target_isa == kMips) { std::unique_ptr<const MipsInstructionSetFeatures> features_mips( MipsInstructionSetFeatures::FromCppDefines()); mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options); RunCode(&codegenMIPS, graph, hook_before_codegen, has_result, expected); - } else if (target_isa == kMips64) { + } +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 + if (target_isa == kMips64) { std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64( Mips64InstructionSetFeatures::FromCppDefines()); mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options); RunCode(&codegenMIPS64, graph, hook_before_codegen, has_result, expected); } +#endif } static ::std::vector<InstructionSet> GetTargetISAs() { diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 89d80cc281..b3d5341de0 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -122,7 +122,10 @@ class HGraphVisualizerDisassembler { new DisassemblerOptions(/* absolute_addresses */ false, base_address, end_address, - /* can_read_literals */ true))); + /* can_read_literals */ true, + Is64BitInstructionSet(instruction_set) + ? &Thread::DumpThreadOffset<PointerSize::k64> + : &Thread::DumpThreadOffset<PointerSize::k32>))); } ~HGraphVisualizerDisassembler() { diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index a6d234d739..8c0231e1aa 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -157,13 +157,26 @@ class OptimizingCFITest : public CFITest { TestImpl(isa, #isa, expected_asm, expected_cfi); \ } +#ifdef ART_ENABLE_CODEGEN_arm TEST_ISA(kThumb2) +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 TEST_ISA(kArm64) +#endif +#ifdef ART_ENABLE_CODEGEN_x86 TEST_ISA(kX86) +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 TEST_ISA(kX86_64) +#endif +#ifdef ART_ENABLE_CODEGEN_mips TEST_ISA(kMips) +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 TEST_ISA(kMips64) +#endif +#ifdef ART_ENABLE_CODEGEN_arm TEST_F(OptimizingCFITest, kThumb2Adjust) { std::vector<uint8_t> expected_asm( expected_asm_kThumb2_adjust, @@ -184,7 +197,9 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) { Finish(); Check(kThumb2, "kThumb2_adjust", expected_asm, expected_cfi); } +#endif +#ifdef ART_ENABLE_CODEGEN_mips TEST_F(OptimizingCFITest, kMipsAdjust) { // One NOP in delay slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum. static constexpr size_t kNumNops = 1u + (1u << 15); @@ -212,7 +227,9 @@ TEST_F(OptimizingCFITest, kMipsAdjust) { Finish(); Check(kMips, "kMips_adjust", expected_asm, expected_cfi); } +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 TEST_F(OptimizingCFITest, kMips64Adjust) { // One NOP in forbidden slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum. static constexpr size_t kNumNops = 1u + (1u << 15); @@ -240,6 +257,7 @@ TEST_F(OptimizingCFITest, kMips64Adjust) { Finish(); Check(kMips64, "kMips64_adjust", expected_asm, expected_cfi); } +#endif #endif // ART_TARGET_ANDROID diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index b73f73893c..6effc306dc 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -279,8 +279,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { const DexFile& dex_file = load_string->GetDexFile(); uint32_t string_index = load_string->GetStringIndex(); - bool is_in_dex_cache = false; - HLoadString::LoadKind desired_load_kind; + HLoadString::LoadKind desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod; uint64_t address = 0u; // String or dex cache element address. { Runtime* runtime = Runtime::Current(); @@ -296,33 +295,14 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { DCHECK(!runtime->UseJitCompilation()); mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache); CHECK(string != nullptr); - if (!compiler_driver_->GetSupportBootImageFixup()) { - // MIPS/MIPS64 or compiler_driver_test. Do not sharpen. - desired_load_kind = HLoadString::LoadKind::kDexCacheViaMethod; - } else { - DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file)); - is_in_dex_cache = true; - desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic() - ? HLoadString::LoadKind::kBootImageLinkTimePcRelative - : HLoadString::LoadKind::kBootImageLinkTimeAddress; - } + // TODO: In follow up CL, add PcRelative and Address back in. } else if (runtime->UseJitCompilation()) { // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus. // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic()); mirror::String* string = dex_cache->GetResolvedString(string_index); - is_in_dex_cache = (string != nullptr); if (string != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(string)) { - // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787 desired_load_kind = HLoadString::LoadKind::kBootImageAddress; address = reinterpret_cast64<uint64_t>(string); - } else { - // Note: If the string is not in the dex cache, the instruction needs environment - // and will not be inlined across dex files. Within a dex file, the slow-path helper - // loads the correct string and inlined frames are used correctly for OOM stack trace. - // TODO: Write a test for this. Bug: 29416588 - desired_load_kind = HLoadString::LoadKind::kDexCacheAddress; - void* dex_cache_element_address = &dex_cache->GetStrings()[string_index]; - address = reinterpret_cast64<uint64_t>(dex_cache_element_address); } } else { // AOT app compilation. Try to lookup the string without allocating if not found. @@ -332,19 +312,9 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { !codegen_->GetCompilerOptions().GetCompilePic()) { desired_load_kind = HLoadString::LoadKind::kBootImageAddress; address = reinterpret_cast64<uint64_t>(string); - } else { - // Not JIT and either the string is not in boot image or we are compiling in PIC mode. - // Use PC-relative load from the dex cache if the dex file belongs - // to the oat file that we're currently compiling. - desired_load_kind = ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file) - ? HLoadString::LoadKind::kDexCachePcRelative - : HLoadString::LoadKind::kDexCacheViaMethod; } } } - if (is_in_dex_cache) { - load_string->MarkInDexCache(); - } HLoadString::LoadKind load_kind = codegen_->GetSupportedLoadStringKind(desired_load_kind); switch (load_kind) { diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc index 797a98cfd5..1b743134ed 100644 --- a/compiler/utils/jni_macro_assembler.cc +++ b/compiler/utils/jni_macro_assembler.cc @@ -99,6 +99,7 @@ MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create( return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena)); #endif default: + UNUSED(arena); LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set; UNREACHABLE(); } diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc index e604c1f629..bcd0d1630a 100644 --- a/disassembler/disassembler.cc +++ b/disassembler/disassembler.cc @@ -32,10 +32,8 @@ Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerO return new arm::DisassemblerArm(options); } else if (instruction_set == kArm64) { return new arm64::DisassemblerArm64(options); - } else if (instruction_set == kMips) { - return new mips::DisassemblerMips(options, false); - } else if (instruction_set == kMips64) { - return new mips::DisassemblerMips(options, true); + } else if (instruction_set == kMips || instruction_set == kMips64) { + return new mips::DisassemblerMips(options); } else if (instruction_set == kX86) { return new x86::DisassemblerX86(options, false); } else if (instruction_set == kX86_64) { diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h index b08031587f..86793ccb19 100644 --- a/disassembler/disassembler.h +++ b/disassembler/disassembler.h @@ -28,8 +28,9 @@ namespace art { class DisassemblerOptions { public: - // Should the disassembler print absolute or relative addresses. - const bool absolute_addresses_; + using ThreadOffsetNameFunction = void (*)(std::ostream& os, uint32_t offset); + + ThreadOffsetNameFunction thread_offset_name_function_; // Base address for calculating relative code offsets when absolute_addresses_ is false. const uint8_t* const base_address_; @@ -37,6 +38,9 @@ class DisassemblerOptions { // End address (exclusive); const uint8_t* const end_address_; + // Should the disassembler print absolute or relative addresses. + const bool absolute_addresses_; + // If set, the disassembler is allowed to look at load targets in literal // pools. const bool can_read_literals_; @@ -44,10 +48,12 @@ class DisassemblerOptions { DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address, const uint8_t* end_address, - bool can_read_literals) - : absolute_addresses_(absolute_addresses), + bool can_read_literals, + ThreadOffsetNameFunction fn) + : thread_offset_name_function_(fn), base_address_(base_address), end_address_(end_address), + absolute_addresses_(absolute_addresses), can_read_literals_(can_read_literals) {} private: diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc index 4f0e144aa8..a47b6adcc9 100644 --- a/disassembler/disassembler_arm.cc +++ b/disassembler/disassembler_arm.cc @@ -25,7 +25,6 @@ #include "base/bit_utils.h" #include "base/logging.h" #include "base/stringprintf.h" -#include "thread.h" namespace art { namespace arm { @@ -329,7 +328,7 @@ void DisassemblerArm::DumpArm(std::ostream& os, const uint8_t* instr_ptr) { } if (rn.r == 9) { args << " ; "; - Thread::DumpThreadOffset<kArmPointerSize>(args, offset); + GetDisassemblerOptions()->thread_offset_name_function_(args, offset); } } } @@ -1401,7 +1400,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) args << Rt << ", [" << Rn << ", #" << (U != 0u ? "" : "-") << imm12 << "]"; if (Rn.r == TR && is_load) { args << " ; "; - Thread::DumpThreadOffset<kArmPointerSize>(args, imm12); + GetDisassemblerOptions()->thread_offset_name_function_(args, imm12); } else if (Rn.r == PC) { T2LitType lit_type[] = { kT2LitUByte, kT2LitUHalf, kT2LitHexWord, kT2LitInvalid, diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc index 0ef9025cd4..80bacb2be3 100644 --- a/disassembler/disassembler_arm64.cc +++ b/disassembler/disassembler_arm64.cc @@ -22,7 +22,6 @@ #include "base/logging.h" #include "base/stringprintf.h" -#include "thread.h" using namespace vixl::aarch64; // NOLINT(build/namespaces) @@ -102,7 +101,7 @@ void CustomDisassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) if (instr->GetRn() == TR) { int64_t offset = instr->GetImmLSUnsigned() << instr->GetSizeLS(); std::ostringstream tmp_stream; - Thread::DumpThreadOffset<kArm64PointerSize>(tmp_stream, static_cast<uint32_t>(offset)); + options_->thread_offset_name_function_(tmp_stream, static_cast<uint32_t>(offset)); AppendToOutput(" ; %s", tmp_stream.str().c_str()); } } diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h index 7c64792b13..19e4dfb486 100644 --- a/disassembler/disassembler_arm64.h +++ b/disassembler/disassembler_arm64.h @@ -35,7 +35,8 @@ class CustomDisassembler FINAL : public vixl::aarch64::Disassembler { : vixl::aarch64::Disassembler(), read_literals_(options->can_read_literals_), base_address_(options->base_address_), - end_address_(options->end_address_) { + end_address_(options->end_address_), + options_(options) { if (!options->absolute_addresses_) { MapCodeAddress(0, reinterpret_cast<const vixl::aarch64::Instruction*>(options->base_address_)); @@ -64,6 +65,8 @@ class CustomDisassembler FINAL : public vixl::aarch64::Disassembler { // Valid address range: [base_address_, end_address_) const void* const base_address_; const void* const end_address_; + + DisassemblerOptions* options_; }; class DisassemblerArm64 FINAL : public Disassembler { diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc index 3448878394..02c6d71510 100644 --- a/disassembler/disassembler_mips.cc +++ b/disassembler/disassembler_mips.cc @@ -21,7 +21,6 @@ #include "base/logging.h" #include "base/stringprintf.h" -#include "thread.h" namespace art { namespace mips { @@ -503,11 +502,7 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) { args << StringPrintf("%+d(r%d)", offset, rs); if (rs == 17) { args << " ; "; - if (is64bit_) { - Thread::DumpThreadOffset<kMips64PointerSize>(args, offset); - } else { - Thread::DumpThreadOffset<kMipsPointerSize>(args, offset); - } + GetDisassemblerOptions()->thread_offset_name_function_(args, offset); } } break; diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h index b0e49b3978..6342f22962 100644 --- a/disassembler/disassembler_mips.h +++ b/disassembler/disassembler_mips.h @@ -26,9 +26,8 @@ namespace mips { class DisassemblerMips FINAL : public Disassembler { public: - DisassemblerMips(DisassemblerOptions* options, bool is64bit) + explicit DisassemblerMips(DisassemblerOptions* options) : Disassembler(options), - is64bit_(is64bit), last_ptr_(nullptr), last_instr_(0) {} @@ -36,8 +35,6 @@ class DisassemblerMips FINAL : public Disassembler { void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE; private: - const bool is64bit_; - // Address and encoding of the last disassembled instruction. // Needed to produce more readable disassembly of certain 2-instruction sequences. const uint8_t* last_ptr_; diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc index 147e0b142d..2ca84e5e5b 100644 --- a/disassembler/disassembler_x86.cc +++ b/disassembler/disassembler_x86.cc @@ -23,7 +23,6 @@ #include "base/logging.h" #include "base/stringprintf.h" -#include "thread.h" namespace art { namespace x86 { @@ -1409,11 +1408,11 @@ DISASSEMBLER_ENTRY(cmp, } if (prefix[1] == kFs && !supports_rex_) { args << " ; "; - Thread::DumpThreadOffset<kX86PointerSize>(args, address_bits); + GetDisassemblerOptions()->thread_offset_name_function_(args, address_bits); } if (prefix[1] == kGs && supports_rex_) { args << " ; "; - Thread::DumpThreadOffset<kX86_64PointerSize>(args, address_bits); + GetDisassemblerOptions()->thread_offset_name_function_(args, address_bits); } const char* prefix_str; switch (prefix[0]) { diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 77730b9255..96c8e94d9b 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -335,10 +335,14 @@ class OatDumper { resolved_addr2instr_(0), instruction_set_(oat_file_.GetOatHeader().GetInstructionSet()), disassembler_(Disassembler::Create(instruction_set_, - new DisassemblerOptions(options_.absolute_addresses_, - oat_file.Begin(), - oat_file.End(), - true /* can_read_literals_ */))) { + new DisassemblerOptions( + options_.absolute_addresses_, + oat_file.Begin(), + oat_file.End(), + true /* can_read_literals_ */, + Is64BitInstructionSet(instruction_set_) + ? &Thread::DumpThreadOffset<PointerSize::k64> + : &Thread::DumpThreadOffset<PointerSize::k32>))) { CHECK(options_.class_loader_ != nullptr); CHECK(options_.class_filter_ != nullptr); CHECK(options_.method_filter_ != nullptr); @@ -1402,7 +1406,7 @@ class OatDumper { const std::vector<const OatFile::OatDexFile*> oat_dex_files_; const OatDumperOptions& options_; uint32_t resolved_addr2instr_; - InstructionSet instruction_set_; + const InstructionSet instruction_set_; std::set<uintptr_t> offsets_; Disassembler* disassembler_; }; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 9432384561..3f6531b5b4 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -37,6 +37,7 @@ #include "gc/space/image_space.h" #include "image-inl.h" #include "mirror/abstract_method.h" +#include "mirror/dex_cache.h" #include "mirror/object-inl.h" #include "mirror/method.h" #include "mirror/reference.h" @@ -592,8 +593,8 @@ void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e. // static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))). - GcRoot<mirror::String>* orig_strings = orig_dex_cache->GetStrings(); - GcRoot<mirror::String>* relocated_strings = RelocatedAddressOfPointer(orig_strings); + mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings(); + mirror::StringDexCacheType* relocated_strings = RelocatedAddressOfPointer(orig_strings); copy_dex_cache->SetField64<false>( mirror::DexCache::StringsOffset(), static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_strings))); diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index c4ec72685f..11357b5596 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -191,7 +191,7 @@ .cfi_rel_offset r11, 44 .cfi_rel_offset ip, 48 .cfi_rel_offset lr, 52 - vpush {d0-d15} @ 32 words of float args. + vpush {d0-d15} @ 32 words, 2 for each of the 16 saved doubles. .cfi_adjust_cfa_offset 128 sub sp, #8 @ 2 words of space, alignment padding and Method* .cfi_adjust_cfa_offset 8 diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 4289cabbc6..3e6fbaf64b 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -331,6 +331,7 @@ #endif // Save FP registers. + // For better performance, store d0 and d31 separately, so that all STPs are 16-byte aligned. str d0, [sp, #8] stp d1, d2, [sp, #16] stp d3, d4, [sp, #32] @@ -431,6 +432,7 @@ .macro RESTORE_SAVE_EVERYTHING_FRAME // Restore FP registers. + // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned. ldr d0, [sp, #8] ldp d1, d2, [sp, #16] ldp d3, d4, [sp, #32] diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index b84e29f7ce..aeb990cae8 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -163,6 +163,7 @@ Arena::Arena() : bytes_allocated_(0), next_(nullptr) { MallocArena::MallocArena(size_t size) { memory_ = reinterpret_cast<uint8_t*>(calloc(1, size)); CHECK(memory_ != nullptr); // Abort on OOM. + DCHECK_ALIGNED(memory_, ArenaAllocator::kAlignment); size_ = size; } @@ -370,6 +371,7 @@ uint8_t* ArenaAllocator::AllocFromNewArena(size_t bytes) { arena_head_ = new_arena; // Update our internal data structures. begin_ = new_arena->Begin(); + DCHECK_ALIGNED(begin_, kAlignment); ptr_ = begin_ + bytes; end_ = new_arena->End(); } diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index 6c1a8984cd..3fad96b39b 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -310,6 +310,7 @@ class ArenaAllocator return AllocFromNewArena(bytes); } uint8_t* ret = ptr_; + DCHECK_ALIGNED(ret, kAlignment); ptr_ += bytes; return ret; } @@ -319,20 +320,24 @@ class ArenaAllocator ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE { DCHECK_GE(new_size, ptr_size); DCHECK_EQ(ptr == nullptr, ptr_size == 0u); - auto* end = reinterpret_cast<uint8_t*>(ptr) + ptr_size; + // We always allocate aligned. + const size_t aligned_ptr_size = RoundUp(ptr_size, kAlignment); + auto* end = reinterpret_cast<uint8_t*>(ptr) + aligned_ptr_size; // If we haven't allocated anything else, we can safely extend. if (end == ptr_) { DCHECK(!IsRunningOnMemoryTool()); // Red zone prevents end == ptr_. - const size_t size_delta = new_size - ptr_size; + const size_t aligned_new_size = RoundUp(new_size, kAlignment); + const size_t size_delta = aligned_new_size - aligned_ptr_size; // Check remain space. const size_t remain = end_ - ptr_; if (remain >= size_delta) { ptr_ += size_delta; ArenaAllocatorStats::RecordAlloc(size_delta, kind); + DCHECK_ALIGNED(ptr_, kAlignment); return ptr; } } - auto* new_ptr = Alloc(new_size, kind); + auto* new_ptr = Alloc(new_size, kind); // Note: Alloc will take care of aligning new_size. memcpy(new_ptr, ptr, ptr_size); // TODO: Call free on ptr if linear alloc supports free. return new_ptr; @@ -362,11 +367,12 @@ class ArenaAllocator bool Contains(const void* ptr) const; + static constexpr size_t kAlignment = 8; + private: void* AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind); uint8_t* AllocFromNewArena(size_t bytes); - static constexpr size_t kAlignment = 8; void UpdateBytesAllocated(); diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index f2575f702f..97aa499b29 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -27,6 +27,8 @@ #include "mirror/object_array.h" #include "handle_scope-inl.h" +#include <atomic> + namespace art { inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* descriptor) { @@ -63,18 +65,21 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx, ArtMethod* referrer) { mirror::Class* declaring_class = referrer->GetDeclaringClass(); // MethodVerifier refuses methods with string_idx out of bounds. - DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings()); - mirror::String* resolved_string = declaring_class->GetDexCacheStrings()[string_idx].Read(); - if (UNLIKELY(resolved_string == nullptr)) { + DCHECK_LT(string_idx, declaring_class->GetDexFile().NumStringIds());; + mirror::String* string = + mirror::StringDexCachePair::LookupString(declaring_class->GetDexCacheStrings(), + string_idx, + mirror::DexCache::kDexCacheStringCacheSize).Read(); + if (UNLIKELY(string == nullptr)) { StackHandleScope<1> hs(Thread::Current()); Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache())); const DexFile& dex_file = *dex_cache->GetDexFile(); - resolved_string = ResolveString(dex_file, string_idx, dex_cache); - if (resolved_string != nullptr) { - DCHECK_EQ(dex_cache->GetResolvedString(string_idx), resolved_string); + string = ResolveString(dex_file, string_idx, dex_cache); + if (string != nullptr) { + DCHECK_EQ(dex_cache->GetResolvedString(string_idx), string); } } - return resolved_string; + return string; } inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, ArtMethod* referrer) { diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 4d48da6a83..1a3bba5d23 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -66,6 +66,7 @@ #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" +#include "mirror/dex_cache.h" #include "mirror/dex_cache-inl.h" #include "mirror/field.h" #include "mirror/iftable-inl.h" @@ -1271,7 +1272,10 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( // If the oat file expects the dex cache arrays to be in the BSS, then allocate there and // copy over the arrays. DCHECK(dex_file != nullptr); - const size_t num_strings = dex_file->NumStringIds(); + size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize; + if (dex_file->NumStringIds() < num_strings) { + num_strings = dex_file->NumStringIds(); + } const size_t num_types = dex_file->NumTypeIds(); const size_t num_methods = dex_file->NumMethodIds(); const size_t num_fields = dex_file->NumFieldIds(); @@ -1281,16 +1285,17 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( CHECK_EQ(num_fields, dex_cache->NumResolvedFields()); DexCacheArraysLayout layout(image_pointer_size_, dex_file); uint8_t* const raw_arrays = oat_dex_file->GetDexCacheArrays(); - // The space is not yet visible to the GC, we can avoid the read barriers and use - // std::copy_n. if (num_strings != 0u) { - GcRoot<mirror::String>* const image_resolved_strings = dex_cache->GetStrings(); - GcRoot<mirror::String>* const strings = - reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset()); - for (size_t j = 0; kIsDebugBuild && j < num_strings; ++j) { - DCHECK(strings[j].IsNull()); + mirror::StringDexCacheType* const image_resolved_strings = dex_cache->GetStrings(); + mirror::StringDexCacheType* const strings = + reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset()); + for (size_t j = 0; j < num_strings; ++j) { + DCHECK_EQ(strings[j].load(std::memory_order_relaxed).string_index, 0u); + DCHECK(strings[j].load(std::memory_order_relaxed).string_pointer.IsNull()); + strings[j].store(image_resolved_strings[j].load(std::memory_order_relaxed), + std::memory_order_relaxed); } - std::copy_n(image_resolved_strings, num_strings, strings); + mirror::StringDexCachePair::Initialize(strings); dex_cache->SetStrings(strings); } if (num_types != 0u) { @@ -1473,14 +1478,14 @@ class UpdateClassLoaderAndResolvedStringsVisitor { bool operator()(mirror::Class* klass) const SHARED_REQUIRES(Locks::mutator_lock_) { if (forward_strings_) { - GcRoot<mirror::String>* strings = klass->GetDexCacheStrings(); + mirror::StringDexCacheType* strings = klass->GetDexCacheStrings(); if (strings != nullptr) { DCHECK( space_->GetImageHeader().GetImageSection(ImageHeader::kSectionDexCacheArrays).Contains( reinterpret_cast<uint8_t*>(strings) - space_->Begin())) << "String dex cache array for " << PrettyClass(klass) << " is not in app image"; // Dex caches have already been updated, so take the strings pointer from there. - GcRoot<mirror::String>* new_strings = klass->GetDexCache()->GetStrings(); + mirror::StringDexCacheType* new_strings = klass->GetDexCache()->GetStrings(); DCHECK_NE(strings, new_strings); klass->SetDexCacheStrings(new_strings); } @@ -2079,18 +2084,31 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, // Zero-initialized. raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size())); } - GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr : - reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset()); + mirror::StringDexCacheType* strings = (dex_file.NumStringIds() == 0u) ? nullptr : + reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset()); GcRoot<mirror::Class>* types = (dex_file.NumTypeIds() == 0u) ? nullptr : reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset()); ArtMethod** methods = (dex_file.NumMethodIds() == 0u) ? nullptr : reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset()); ArtField** fields = (dex_file.NumFieldIds() == 0u) ? nullptr : reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset()); + size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize; + if (dex_file.NumStringIds() < num_strings) { + num_strings = dex_file.NumStringIds(); + } + DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) << + "Expected raw_arrays to align to StringDexCacheType."; + DCHECK_ALIGNED(layout.StringsOffset(), alignof(mirror::StringDexCacheType)) << + "Expected StringsOffset() to align to StringDexCacheType."; + DCHECK_ALIGNED(strings, alignof(mirror::StringDexCacheType)) << + "Expected strings to align to StringDexCacheType."; + static_assert(alignof(mirror::StringDexCacheType) == 8u, + "Expected StringDexCacheType to have align of 8."); if (kIsDebugBuild) { // Sanity check to make sure all the dex cache arrays are empty. b/28992179 - for (size_t i = 0; i < dex_file.NumStringIds(); ++i) { - CHECK(strings[i].Read<kWithoutReadBarrier>() == nullptr); + for (size_t i = 0; i < num_strings; ++i) { + CHECK_EQ(strings[i].load(std::memory_order_relaxed).string_index, 0u); + CHECK(strings[i].load(std::memory_order_relaxed).string_pointer.IsNull()); } for (size_t i = 0; i < dex_file.NumTypeIds(); ++i) { CHECK(types[i].Read<kWithoutReadBarrier>() == nullptr); @@ -2102,10 +2120,13 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, CHECK(mirror::DexCache::GetElementPtrSize(fields, i, image_pointer_size_) == nullptr); } } + if (strings != nullptr) { + mirror::StringDexCachePair::Initialize(strings); + } dex_cache->Init(&dex_file, location.Get(), strings, - dex_file.NumStringIds(), + num_strings, types, dex_file.NumTypeIds(), methods, diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 42816a04f1..7afe6f9ab4 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -435,8 +435,10 @@ void ConcurrentCopying::FlipThreadRoots() { gc_barrier_->Init(self, 0); ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_); FlipCallback flip_callback(this); + heap_->ThreadFlipBegin(self); // Sync with JNI critical calls. size_t barrier_count = Runtime::Current()->FlipThreadRoots( &thread_flip_visitor, &flip_callback, this); + heap_->ThreadFlipEnd(self); { ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); gc_barrier_->Increment(self, barrier_count); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 638c1d841a..39f26e7fe2 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -878,13 +878,9 @@ void Heap::IncrementDisableThreadFlip(Thread* self) { MutexLock mu(self, *thread_flip_lock_); bool has_waited = false; uint64_t wait_start = NanoTime(); - if (thread_flip_running_) { - TimingLogger::ScopedTiming split("IncrementDisableThreadFlip", - GetCurrentGcIteration()->GetTimings()); - while (thread_flip_running_) { - has_waited = true; - thread_flip_cond_->Wait(self); - } + while (thread_flip_running_) { + has_waited = true; + thread_flip_cond_->Wait(self); } ++disable_thread_flip_count_; if (has_waited) { diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 4505c249fe..ae6c3214a7 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -1197,9 +1197,9 @@ class ImageSpaceLoader { for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) { mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i); // Fix up dex cache pointers. - GcRoot<mirror::String>* strings = dex_cache->GetStrings(); + mirror::StringDexCacheType* strings = dex_cache->GetStrings(); if (strings != nullptr) { - GcRoot<mirror::String>* new_strings = fixup_adapter.ForwardObject(strings); + mirror::StringDexCacheType* new_strings = fixup_adapter.ForwardObject(strings); if (strings != new_strings) { dex_cache->SetStrings(new_strings); } diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 4fd1514e39..90c8227443 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -23,6 +23,7 @@ #include <iostream> #include <sstream> +#include <atomic> #include "art_field-inl.h" #include "art_method-inl.h" @@ -37,6 +38,8 @@ #include "handle_scope-inl.h" #include "jit/jit.h" #include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/method.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/string-inl.h" @@ -264,15 +267,20 @@ static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uin ArtMethod* method = shadow_frame.GetMethod(); mirror::Class* declaring_class = method->GetDeclaringClass(); // MethodVerifier refuses methods with string_idx out of bounds. - DCHECK_LT(string_idx, declaring_class->GetDexCache()->NumStrings()); - mirror::String* s = declaring_class->GetDexCacheStrings()[string_idx].Read(); - if (UNLIKELY(s == nullptr)) { + DCHECK_LT(string_idx % mirror::DexCache::kDexCacheStringCacheSize, + declaring_class->GetDexFile().NumStringIds()); + mirror::String* string_ptr = + mirror::StringDexCachePair::LookupString(declaring_class->GetDexCacheStrings(), + string_idx, + mirror::DexCache::kDexCacheStringCacheSize).Read(); + if (UNLIKELY(string_ptr == nullptr)) { StackHandleScope<1> hs(self); Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache())); - s = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), string_idx, - dex_cache); + string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(), + string_idx, + dex_cache); } - return s; + return string_ptr; } // Handles div-int, div-int/2addr, div-int/li16 and div-int/lit8 instructions. diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 8ad47eb799..0f2aac2790 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -26,7 +26,6 @@ #include "base/length_prefixed_array.h" #include "class_loader.h" #include "common_throws.h" -#include "dex_cache.h" #include "dex_file.h" #include "gc/heap-inl.h" #include "iftable.h" @@ -899,12 +898,12 @@ inline uint32_t Class::NumDirectInterfaces() { } } -inline void Class::SetDexCacheStrings(GcRoot<String>* new_dex_cache_strings) { +inline void Class::SetDexCacheStrings(StringDexCacheType* new_dex_cache_strings) { SetFieldPtr<false>(DexCacheStringsOffset(), new_dex_cache_strings); } -inline GcRoot<String>* Class::GetDexCacheStrings() { - return GetFieldPtr<GcRoot<String>*>(DexCacheStringsOffset()); +inline StringDexCacheType* Class::GetDexCacheStrings() { + return GetFieldPtr64<StringDexCacheType*>(DexCacheStringsOffset()); } template<ReadBarrierOption kReadBarrierOption, class Visitor> @@ -1058,8 +1057,8 @@ inline void Class::FixupNativePointers(mirror::Class* dest, dest->SetMethodsPtrInternal(new_methods); } // Update dex cache strings. - GcRoot<mirror::String>* strings = GetDexCacheStrings(); - GcRoot<mirror::String>* new_strings = visitor(strings); + StringDexCacheType* strings = GetDexCacheStrings(); + StringDexCacheType* new_strings = visitor(strings); if (strings != new_strings) { dest->SetDexCacheStrings(new_strings); } diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 978fc4cbbf..e2cd649d99 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -54,6 +54,9 @@ class Constructor; class DexCache; class IfTable; class Method; +struct StringDexCachePair; + +using StringDexCacheType = std::atomic<mirror::StringDexCachePair>; // C++ mirror of java.lang.Class class MANAGED Class FINAL : public Object { @@ -1219,8 +1222,8 @@ class MANAGED Class FINAL : public Object { bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_); void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_); - GcRoot<String>* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_); - void SetDexCacheStrings(GcRoot<String>* new_dex_cache_strings) + StringDexCacheType* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_); + void SetDexCacheStrings(StringDexCacheType* new_dex_cache_strings) SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset DexCacheStringsOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_); diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index 84469ea868..a3071b7f63 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -27,6 +27,8 @@ #include "mirror/class.h" #include "runtime.h" +#include <atomic> + namespace art { namespace mirror { @@ -35,15 +37,18 @@ inline uint32_t DexCache::ClassSize(PointerSize pointer_size) { return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } -inline String* DexCache::GetResolvedString(uint32_t string_idx) { - DCHECK_LT(string_idx, NumStrings()); - return GetStrings()[string_idx].Read(); +inline mirror::String* DexCache::GetResolvedString(uint32_t string_idx) { + DCHECK_LT(string_idx, GetDexFile()->NumStringIds()); + return StringDexCachePair::LookupString(GetStrings(), string_idx, NumStrings()).Read(); } -inline void DexCache::SetResolvedString(uint32_t string_idx, String* resolved) { - DCHECK_LT(string_idx, NumStrings()); +inline void DexCache::SetResolvedString(uint32_t string_idx, mirror::String* resolved) { + DCHECK_LT(string_idx % NumStrings(), NumStrings()); // TODO default transaction support. - GetStrings()[string_idx] = GcRoot<String>(resolved); + StringDexCachePair idx_ptr; + idx_ptr.string_index = string_idx; + idx_ptr.string_pointer = GcRoot<String>(resolved); + GetStrings()[string_idx % NumStrings()].store(idx_ptr, std::memory_order_relaxed); // TODO: Fine-grained marking, so that we don't need to go through all arrays in full. Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this); } @@ -131,9 +136,16 @@ inline void DexCache::VisitReferences(mirror::Class* klass, const Visitor& visit VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor); // Visit arrays after. if (kVisitNativeRoots) { - GcRoot<mirror::String>* strings = GetStrings(); + mirror::StringDexCacheType* strings = GetStrings(); for (size_t i = 0, num_strings = NumStrings(); i != num_strings; ++i) { - visitor.VisitRootIfNonNull(strings[i].AddressWithoutBarrier()); + StringDexCachePair source = strings[i].load(std::memory_order_relaxed); + mirror::String* before = source.string_pointer.Read<kReadBarrierOption>(); + GcRoot<mirror::String> root(before); + visitor.VisitRootIfNonNull(root.AddressWithoutBarrier()); + if (root.Read() != before) { + source.string_pointer = GcRoot<String>(root.Read()); + strings[i].store(source, std::memory_order_relaxed); + } } GcRoot<mirror::Class>* resolved_types = GetResolvedTypes(); for (size_t i = 0, num_types = NumResolvedTypes(); i != num_types; ++i) { @@ -143,12 +155,14 @@ inline void DexCache::VisitReferences(mirror::Class* klass, const Visitor& visit } template <ReadBarrierOption kReadBarrierOption, typename Visitor> -inline void DexCache::FixupStrings(GcRoot<mirror::String>* dest, const Visitor& visitor) { - GcRoot<mirror::String>* src = GetStrings(); +inline void DexCache::FixupStrings(mirror::StringDexCacheType* dest, const Visitor& visitor) { + mirror::StringDexCacheType* src = GetStrings(); for (size_t i = 0, count = NumStrings(); i < count; ++i) { - mirror::String* source = src[i].Read<kReadBarrierOption>(); - mirror::String* new_source = visitor(source); - dest[i] = GcRoot<mirror::String>(new_source); + StringDexCachePair source = src[i].load(std::memory_order_relaxed); + mirror::String* ptr = source.string_pointer.Read<kReadBarrierOption>(); + mirror::String* new_source = visitor(ptr); + source.string_pointer = GcRoot<String>(new_source); + dest[i].store(source, std::memory_order_relaxed); } } diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc index 57066d8376..cfcec9cd3a 100644 --- a/runtime/mirror/dex_cache.cc +++ b/runtime/mirror/dex_cache.cc @@ -33,7 +33,7 @@ namespace mirror { void DexCache::Init(const DexFile* dex_file, String* location, - GcRoot<String>* strings, + StringDexCacheType* strings, uint32_t num_strings, GcRoot<Class>* resolved_types, uint32_t num_resolved_types, diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h index d02a0d8e2f..770c45d558 100644 --- a/runtime/mirror/dex_cache.h +++ b/runtime/mirror/dex_cache.h @@ -35,12 +35,62 @@ namespace mirror { class String; +struct PACKED(8) StringDexCachePair { + GcRoot<String> string_pointer; + uint32_t string_index; + // The array is initially [ {0,0}, {0,0}, {0,0} ... ] + // We maintain the invariant that once a dex cache entry is populated, + // the pointer is always non-0 + // Any given entry would thus be: + // {non-0, non-0} OR {0,0} + // + // It's generally sufficiently enough then to check if the + // lookup string index matches the stored string index (for a >0 string index) + // because if it's true the pointer is also non-null. + // + // For the 0th entry which is a special case, the value is either + // {0,0} (initial state) or {non-0, 0} which indicates + // that a valid string is stored at that index for a dex string id of 0. + // + // As an optimization, we want to avoid branching on the string pointer since + // it's always non-null if the string id branch succeeds (except for the 0th string id). + // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail + // the lookup string id == stored id branch. + static void Initialize(StringDexCacheType* strings) { + DCHECK(StringDexCacheType().is_lock_free()); + mirror::StringDexCachePair first_elem; + first_elem.string_pointer = GcRoot<String>(nullptr); + first_elem.string_index = 1; + strings[0].store(first_elem, std::memory_order_relaxed); + } + static GcRoot<String> LookupString(StringDexCacheType* dex_cache, + uint32_t string_idx, + uint32_t cache_size) { + StringDexCachePair index_string = dex_cache[string_idx % cache_size] + .load(std::memory_order_relaxed); + if (string_idx != index_string.string_index) return GcRoot<String>(nullptr); + DCHECK(!index_string.string_pointer.IsNull()); + return index_string.string_pointer; + } +}; +using StringDexCacheType = std::atomic<StringDexCachePair>; + + // C++ mirror of java.lang.DexCache. class MANAGED DexCache FINAL : public Object { public: // Size of java.lang.DexCache.class. static uint32_t ClassSize(PointerSize pointer_size); + // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. + static constexpr size_t kDexCacheStringCacheSize = 1024; + static_assert(IsPowerOfTwo(kDexCacheStringCacheSize), + "String dex cache size is not a power of 2."); + + static constexpr size_t StaticStringSize() { + return kDexCacheStringCacheSize; + } + // Size of an instance of java.lang.DexCache not including referenced values. static constexpr uint32_t InstanceSize() { return sizeof(DexCache); @@ -48,7 +98,7 @@ class MANAGED DexCache FINAL : public Object { void Init(const DexFile* dex_file, String* location, - GcRoot<String>* strings, + StringDexCacheType* strings, uint32_t num_strings, GcRoot<Class>* resolved_types, uint32_t num_resolved_types, @@ -62,7 +112,7 @@ class MANAGED DexCache FINAL : public Object { SHARED_REQUIRES(Locks::mutator_lock_); template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor> - void FixupStrings(GcRoot<mirror::String>* dest, const Visitor& visitor) + void FixupStrings(StringDexCacheType* dest, const Visitor& visitor) SHARED_REQUIRES(Locks::mutator_lock_); template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor> @@ -109,10 +159,10 @@ class MANAGED DexCache FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_); } - String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE + mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); - void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE + void SetResolvedString(uint32_t string_idx, mirror::String* resolved) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); Class* GetResolvedType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_); @@ -135,11 +185,11 @@ class MANAGED DexCache FINAL : public Object { ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size) SHARED_REQUIRES(Locks::mutator_lock_); - GcRoot<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { - return GetFieldPtr<GcRoot<String>*>(StringsOffset()); + StringDexCacheType* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { + return GetFieldPtr64<StringDexCacheType*>(StringsOffset()); } - void SetStrings(GcRoot<String>* strings) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { + void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldPtr<false>(StringsOffset(), strings); } @@ -224,7 +274,8 @@ class MANAGED DexCache FINAL : public Object { uint64_t resolved_fields_; // ArtField*, array with num_resolved_fields_ elements. uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements. uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements. - uint64_t strings_; // GcRoot<String>*, array with num_strings_ elements. + uint64_t strings_; // std::atomic<StringDexCachePair>*, + // array with num_strings_ elements. uint32_t num_resolved_fields_; // Number of elements in the resolved_fields_ array. uint32_t num_resolved_methods_; // Number of elements in the resolved_methods_ array. uint32_t num_resolved_types_; // Number of elements in the resolved_types_ array. diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc index 48f2ca59e8..175997c2dc 100644 --- a/runtime/mirror/dex_cache_test.cc +++ b/runtime/mirror/dex_cache_test.cc @@ -22,6 +22,7 @@ #include "common_runtime_test.h" #include "linear_alloc.h" #include "mirror/class_loader-inl.h" +#include "mirror/dex_cache-inl.h" #include "handle_scope-inl.h" #include "scoped_thread_state_change.h" @@ -40,7 +41,8 @@ TEST_F(DexCacheTest, Open) { Runtime::Current()->GetLinearAlloc()))); ASSERT_TRUE(dex_cache.Get() != nullptr); - EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings()); + EXPECT_TRUE(dex_cache->StaticStringSize() == dex_cache->NumStrings() + || java_lang_dex_file_->NumStringIds() == dex_cache->NumStrings()); EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes()); EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods()); EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields()); diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc index 994ccb1ad9..f0140a303b 100644 --- a/runtime/native/java_lang_DexCache.cc +++ b/runtime/native/java_lang_DexCache.cc @@ -59,7 +59,7 @@ static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint static jobject DexCache_getResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index) { ScopedFastNativeObjectAccess soa(env); mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache); - CHECK_LT(static_cast<size_t>(string_index), dex_cache->NumStrings()); + CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds()); return soa.AddLocalReference<jobject>(dex_cache->GetResolvedString(string_index)); } @@ -75,7 +75,7 @@ static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint s jobject string) { ScopedFastNativeObjectAccess soa(env); mirror::DexCache* dex_cache = soa.Decode<mirror::DexCache*>(javaDexCache); - CHECK_LT(static_cast<size_t>(string_index), dex_cache->NumStrings()); + CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds()); dex_cache->SetResolvedString(string_index, soa.Decode<mirror::String*>(string)); } diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index 216d8a7194..3aa1fc256d 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -224,7 +224,6 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() { thread_to_pass = this; } MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_); - ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this); old_state_and_flags.as_int = tls32_.state_and_flags.as_int; DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { diff --git a/runtime/thread.cc b/runtime/thread.cc index 0457ba0d26..b35a614e99 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1217,8 +1217,10 @@ void Thread::FullSuspendCheck() { ScopedTrace trace(__FUNCTION__); VLOG(threads) << this << " self-suspending"; // Make thread appear suspended to other threads, release mutator_lock_. + tls32_.suspended_at_suspend_check = true; // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. ScopedThreadSuspension(this, kSuspended); + tls32_.suspended_at_suspend_check = false; VLOG(threads) << this << " self-reviving"; } @@ -1633,7 +1635,7 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte } tlsPtr_.flip_function = nullptr; tlsPtr_.thread_local_mark_stack = nullptr; - tls32_.is_transitioning_to_runnable = false; + tls32_.suspended_at_suspend_check = false; } bool Thread::IsStillStarting() const { @@ -1771,7 +1773,7 @@ Thread::~Thread() { CHECK(tlsPtr_.checkpoint_function == nullptr); CHECK_EQ(checkpoint_overflow_.size(), 0u); CHECK(tlsPtr_.flip_function == nullptr); - CHECK_EQ(tls32_.is_transitioning_to_runnable, false); + CHECK_EQ(tls32_.suspended_at_suspend_check, false); // Make sure we processed all deoptimization requests. CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; diff --git a/runtime/thread.h b/runtime/thread.h index 1c2d4ab533..840b7817f8 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -1085,12 +1085,8 @@ class Thread { return tlsPtr_.nested_signal_state; } - bool IsTransitioningToRunnable() const { - return tls32_.is_transitioning_to_runnable; - } - - void SetIsTransitioningToRunnable(bool value) { - tls32_.is_transitioning_to_runnable = value; + bool IsSuspendedAtSuspendCheck() const { + return tls32_.suspended_at_suspend_check; } void PushVerifier(verifier::MethodVerifier* verifier); @@ -1268,7 +1264,7 @@ class Thread { suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0), daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0), thread_exit_check_count(0), handling_signal_(false), - is_transitioning_to_runnable(false), ready_for_debug_invoke(false), + suspended_at_suspend_check(false), ready_for_debug_invoke(false), debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true), disable_thread_flip_count(0) { } @@ -1310,10 +1306,10 @@ class Thread { // True if signal is being handled by this thread. bool32_t handling_signal_; - // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the - // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from - // the rest of them. - bool32_t is_transitioning_to_runnable; + // True if the thread is suspended in FullSuspendCheck(). This is + // used to distinguish runnable threads that are suspended due to + // a normal suspend check from other threads. + bool32_t suspended_at_suspend_check; // True if the thread has been suspended by a debugger event. This is // used to invoke method from the debugger which is only allowed when @@ -1592,26 +1588,6 @@ class ScopedDebugDisallowReadBarriers { Thread* const self_; }; -class ScopedTransitioningToRunnable : public ValueObject { - public: - explicit ScopedTransitioningToRunnable(Thread* self) - : self_(self) { - DCHECK_EQ(self, Thread::Current()); - if (kUseReadBarrier) { - self_->SetIsTransitioningToRunnable(true); - } - } - - ~ScopedTransitioningToRunnable() { - if (kUseReadBarrier) { - self_->SetIsTransitioningToRunnable(false); - } - } - - private: - Thread* const self_; -}; - std::ostream& operator<<(std::ostream& os, const Thread& thread); std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread); diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 688514cd76..419ecec696 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -405,8 +405,6 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Locks::thread_suspend_count_lock_->AssertNotHeld(self); CHECK_NE(self->GetState(), kRunnable); - collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls. - SuspendAllInternal(self, self, nullptr); // Run the flip callback for the collector. @@ -416,31 +414,26 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, collector->RegisterPause(NanoTime() - start_time); // Resume runnable threads. - size_t runnable_thread_count = 0; + std::vector<Thread*> runnable_threads; std::vector<Thread*> other_threads; { - TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings()); MutexLock mu(self, *Locks::thread_list_lock_); MutexLock mu2(self, *Locks::thread_suspend_count_lock_); --suspend_all_count_; for (const auto& thread : list_) { - // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by - // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before - // a runnable thread runs it for itself or we run it for a suspended thread below. - thread->SetFlipFunction(thread_flip_visitor); if (thread == self) { continue; } - // Resume early the threads that were runnable but are suspended just for this thread flip or - // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to - // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting - // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip), - ThreadState state = thread->GetState(); - if (state == kWaitingForGcThreadFlip || - thread->IsTransitioningToRunnable()) { + // Set the flip function for both runnable and suspended threads + // because Thread::DumpState/DumpJavaStack() (invoked by a + // checkpoint) may cause the flip function to be run for a + // runnable/suspended thread before a runnable threads runs it + // for itself or we run it for a suspended thread below. + thread->SetFlipFunction(thread_flip_visitor); + if (thread->IsSuspendedAtSuspendCheck()) { // The thread will resume right after the broadcast. thread->ModifySuspendCount(self, -1, nullptr, false); - ++runnable_thread_count; + runnable_threads.push_back(thread); } else { other_threads.push_back(thread); } @@ -448,11 +441,8 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Thread::resume_cond_->Broadcast(self); } - collector->GetHeap()->ThreadFlipEnd(self); - // Run the closure on the other threads and let them resume. { - TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings()); ReaderMutexLock mu(self, *Locks::mutator_lock_); for (const auto& thread : other_threads) { Closure* flip_func = thread->GetFlipFunction(); @@ -461,15 +451,11 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, } } // Run it for self. - Closure* flip_func = self->GetFlipFunction(); - if (flip_func != nullptr) { - flip_func->Run(self); - } + thread_flip_visitor->Run(self); } // Resume other threads. { - TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings()); MutexLock mu2(self, *Locks::thread_suspend_count_lock_); for (const auto& thread : other_threads) { thread->ModifySuspendCount(self, -1, nullptr, false); @@ -477,7 +463,7 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Thread::resume_cond_->Broadcast(self); } - return runnable_thread_count + other_threads.size() + 1; // +1 for self. + return runnable_threads.size() + other_threads.size() + 1; // +1 for self. } void ThreadList::SuspendAll(const char* cause, bool long_suspend) { diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h index 7733a51aa3..4c63156939 100644 --- a/runtime/utils/dex_cache_arrays_layout-inl.h +++ b/runtime/utils/dex_cache_arrays_layout-inl.h @@ -23,6 +23,7 @@ #include "base/logging.h" #include "gc_root.h" #include "globals.h" +#include "mirror/dex_cache.h" #include "primitive.h" namespace art { @@ -45,12 +46,11 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, cons : DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) { } -inline size_t DexCacheArraysLayout::Alignment() const { +inline constexpr size_t DexCacheArraysLayout::Alignment() { // GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment. static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4"); - static_assert(alignof(GcRoot<mirror::String>) == 4, "Expecting alignof(GcRoot<>) == 4"); - // Pointer alignment is the same as pointer size. - return static_cast<size_t>(pointer_size_); + static_assert(alignof(mirror::StringDexCacheType) == 8, "Expecting alignof(StringDexCacheType) == 8"); + return alignof(mirror::StringDexCacheType); } template <typename T> @@ -87,15 +87,22 @@ inline size_t DexCacheArraysLayout::MethodsAlignment() const { } inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const { - return strings_offset_ + ElementOffset(GcRootAsPointerSize<mirror::String>(), string_idx); + return strings_offset_ + ElementOffset(PointerSize::k64, + string_idx % mirror::DexCache::kDexCacheStringCacheSize); } inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const { - return ArraySize(GcRootAsPointerSize<mirror::String>(), num_elements); + size_t cache_size = mirror::DexCache::kDexCacheStringCacheSize; + if (num_elements < cache_size) { + cache_size = num_elements; + } + return ArraySize(PointerSize::k64, cache_size); } inline size_t DexCacheArraysLayout::StringsAlignment() const { - return alignof(GcRoot<mirror::String>); + static_assert(alignof(mirror::StringDexCacheType) == 8, + "Expecting alignof(StringDexCacheType) == 8"); + return alignof(mirror::StringDexCacheType); } inline size_t DexCacheArraysLayout::FieldOffset(uint32_t field_idx) const { diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h index f2437fa551..20ffa90592 100644 --- a/runtime/utils/dex_cache_arrays_layout.h +++ b/runtime/utils/dex_cache_arrays_layout.h @@ -52,7 +52,7 @@ class DexCacheArraysLayout { return size_; } - size_t Alignment() const; + static constexpr size_t Alignment(); size_t TypesOffset() const { return types_offset_; diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index f00a0dc55a..75c4f34073 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -225,9 +225,11 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), # Disable 149-suspend-all-stress, its output is flaky (b/28988206). # Disable 577-profile-foreign-dex (b/27454772). +# Disable 552-checker-sharpening, until compiler component of new string dex cache is added (@cwadsworth, @vmarko) TEST_ART_BROKEN_ALL_TARGET_TESTS := \ 149-suspend-all-stress \ 577-profile-foreign-dex \ + 552-checker-sharpening \ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ @@ -564,6 +566,18 @@ TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \ # Tests that should fail in the read barrier configuration with JIT (Optimizing compiler). TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS := +# Tests failing in non-Baker read barrier configurations with the Optimizing compiler (AOT). +# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet +# handled in non-Baker read barrier configurations. +TEST_ART_BROKEN_OPTIMIZING_NON_BAKER_READ_BARRIER_RUN_TESTS := \ + 537-checker-arraycopy + +# Tests failing in non-Baker read barrier configurations with JIT (Optimizing compiler). +# 537: Expects an array copy to be intrinsified, but calling-on-slowpath intrinsics are not yet +# handled in non-Baker read barrier configurations. +TEST_ART_BROKEN_JIT_NON_BAKER_READ_BARRIER_RUN_TESTS := \ + 537-checker-arraycopy + ifeq ($(ART_USE_READ_BARRIER),true) ifneq (,$(filter interpreter,$(COMPILER_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \ @@ -574,9 +588,15 @@ ifeq ($(ART_USE_READ_BARRIER),true) ifneq (,$(filter $(OPTIMIZING_COMPILER_TYPES),$(COMPILER_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \ - $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \ - $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \ + $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES), \ + $(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \ $(TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES)) + ifneq ($(ART_READ_BARRIER_TYPE),BAKER) + ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \ + $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES), \ + $(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \ + $(TEST_ART_BROKEN_OPTIMIZING_NON_BAKER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES)) + endif endif ifneq (,$(filter jit,$(COMPILER_TYPES))) @@ -584,6 +604,12 @@ ifeq ($(ART_USE_READ_BARRIER),true) $(PREBUILD_TYPES),jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \ $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \ $(TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES)) + ifneq ($(ART_READ_BARRIER_TYPE),BAKER) + ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \ + $(PREBUILD_TYPES),jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES), \ + $(JNI_TYPES),$(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \ + $(TEST_ART_BROKEN_JIT_NON_BAKER_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES)) + endif endif endif diff --git a/tools/javafuzz/README.md b/tools/javafuzz/README.md index ca8532ae72..35c057c5bb 100644 --- a/tools/javafuzz/README.md +++ b/tools/javafuzz/README.md @@ -1,12 +1,12 @@ JavaFuzz ======== -JavaFuzz is tool for generating random Java programs with the objective of -fuzz testing the ART infrastructure. Each randomly generated Java program +JavaFuzz is a tool for generating random Java programs with the objective +of fuzz testing the ART infrastructure. Each randomly generated Java program can be run under various modes of execution, such as using the interpreter, using the optimizing compiler, using an external reference implementation, or using various target architectures. Any difference between the outputs -(a divergence) may indicate a bug in one of the execution modes. +(**divergence**) may indicate a bug in one of the execution modes. JavaFuzz can be combined with dexfuzz to get multilayered fuzz testing. @@ -36,6 +36,24 @@ a fixed testing class named Test. So a typical test run looks as follows. jack -cp ${JACK_CLASSPATH} --output-dex . Test.java art -classpath classes.dex Test +How to start the JavaFuzz tests +=============================== + + run_java_fuzz_test.py [--num_tests] + [--mode1=mode] [--mode2=mode] + +where + + --num_tests: number of tests to run (10000 by default) + --mode1:m1 + --mode2:m2 + with m1 != m2, and one of + ri : reference implementation on host (default for m1) + hint : Art interpreter on host + hopt : Art optimizing on host (default for m2) + tint : Art interpreter on target + topt : Art optimizing on target + Background ========== diff --git a/tools/javafuzz/run_java_fuzz_test.py b/tools/javafuzz/run_java_fuzz_test.py new file mode 100755 index 0000000000..4f192e7c44 --- /dev/null +++ b/tools/javafuzz/run_java_fuzz_test.py @@ -0,0 +1,406 @@ +#!/usr/bin/env python2 +# +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import argparse +import subprocess +import sys +import os + +from tempfile import mkdtemp +from threading import Timer + +# Normalized return codes. +EXIT_SUCCESS = 0 +EXIT_TIMEOUT = 1 +EXIT_NOTCOMPILED = 2 +EXIT_NOTRUN = 3 + +# +# Utility methods. +# + +def RunCommand(cmd, args, out, err, timeout = 5): + """Executes a command, and returns its return code. + + Args: + cmd: string, a command to execute + args: string, arguments to pass to command (or None) + out: string, file name to open for stdout (or None) + err: string, file name to open for stderr (or None) + timeout: int, time out in seconds + Returns: + return code of running command (forced EXIT_TIMEOUT on timeout) + """ + cmd = 'exec ' + cmd # preserve pid + if args != None: + cmd = cmd + ' ' + args + outf = None + if out != None: + outf = open(out, mode='w') + errf = None + if err != None: + errf = open(err, mode='w') + proc = subprocess.Popen(cmd, stdout=outf, stderr=errf, shell=True) + timer = Timer(timeout, proc.kill) # enforces timeout + timer.start() + proc.communicate() + if timer.is_alive(): + timer.cancel() + returncode = proc.returncode + else: + returncode = EXIT_TIMEOUT + if outf != None: + outf.close() + if errf != None: + errf.close() + return returncode + +def GetJackClassPath(): + """Returns Jack's classpath.""" + top = os.environ.get('ANDROID_BUILD_TOP') + if top == None: + raise FatalError('Cannot find AOSP build top') + libdir = top + '/out/host/common/obj/JAVA_LIBRARIES' + return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \ + + libdir + '/core-oj-hostdex_intermediates/classes.jack' + +def GetExecutionModeRunner(mode): + """Returns a runner for the given execution mode. + + Args: + mode: string, execution mode + Returns: + TestRunner with given execution mode + Raises: + FatalError: error for unknown execution mode + """ + if mode == 'ri': + return TestRunnerRIOnHost() + if mode == 'hint': + return TestRunnerArtOnHost(True) + if mode == 'hopt': + return TestRunnerArtOnHost(False) + if mode == 'tint': + return TestRunnerArtOnTarget(True) + if mode == 'topt': + return TestRunnerArtOnTarget(False) + raise FatalError('Unknown execution mode') + +def GetReturnCode(retc): + """Returns a string representation of the given normalized return code. + Args: + retc: int, normalized return code + Returns: + string representation of normalized return code + Raises: + FatalError: error for unknown normalized return code + """ + if retc == EXIT_SUCCESS: + return 'SUCCESS' + if retc == EXIT_TIMEOUT: + return 'TIMED-OUT' + if retc == EXIT_NOTCOMPILED: + return 'NOT-COMPILED' + if retc == EXIT_NOTRUN: + return 'NOT-RUN' + raise FatalError('Unknown normalized return code') + +# +# Execution mode classes. +# + +class TestRunner(object): + """Abstraction for running a test in a particular execution mode.""" + __meta_class__ = abc.ABCMeta + + def GetDescription(self): + """Returns a description string of the execution mode.""" + return self._description + + def GetId(self): + """Returns a short string that uniquely identifies the execution mode.""" + return self._id + + @abc.abstractmethod + def CompileAndRunTest(self): + """Compile and run the generated test. + + Ensures that the current Test.java in the temporary directory is compiled + and executed under the current execution mode. On success, transfers the + generated output to the file GetId()_out.txt in the temporary directory. + Cleans up after itself. + + Most nonzero return codes are assumed non-divergent, since systems may + exit in different ways. This is enforced by normalizing return codes. + + Returns: + normalized return code + """ + pass + +class TestRunnerRIOnHost(TestRunner): + """Concrete test runner of the reference implementation on host.""" + + def __init__(self): + """Constructor for the RI tester.""" + self._description = 'RI on host' + self._id = 'RI' + + def CompileAndRunTest(self): + if RunCommand('javac', 'Test.java', + out=None, err=None, timeout=30) == EXIT_SUCCESS: + retc = RunCommand('java', 'Test', 'RI_run_out.txt', err=None) + if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT: + retc = EXIT_NOTRUN + else: + retc = EXIT_NOTCOMPILED + # Cleanup and return. + RunCommand('rm', '-f Test.class', out=None, err=None) + return retc + +class TestRunnerArtOnHost(TestRunner): + """Concrete test runner of Art on host (interpreter or optimizing).""" + + def __init__(self, interpreter): + """Constructor for the Art on host tester. + + Args: + interpreter: boolean, selects between interpreter or optimizing + """ + self._art_args = '-cp classes.dex Test' + if interpreter: + self._description = 'Art interpreter on host' + self._id = 'HInt' + self._art_args = '-Xint ' + self._art_args + else: + self._description = 'Art optimizing on host' + self._id = 'HOpt' + self._jack_args = '-cp ' + GetJackClassPath() + ' --output-dex . Test.java' + + def CompileAndRunTest(self): + if RunCommand('jack', self._jack_args, + out=None, err='jackerr.txt', timeout=30) == EXIT_SUCCESS: + out = self.GetId() + '_run_out.txt' + retc = RunCommand('art', self._art_args, out, 'arterr.txt') + if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT: + retc = EXIT_NOTRUN + else: + retc = EXIT_NOTCOMPILED + # Cleanup and return. + RunCommand('rm', '-rf classes.dex jackerr.txt arterr.txt android-data*', + out=None, err=None) + return retc + +# TODO: very rough first version without proper cache, +# reuse staszkiewicz' module for properly setting up dalvikvm on target. +class TestRunnerArtOnTarget(TestRunner): + """Concrete test runner of Art on target (interpreter or optimizing).""" + + def __init__(self, interpreter): + """Constructor for the Art on target tester. + + Args: + interpreter: boolean, selects between interpreter or optimizing + """ + self._dalvik_args = '-cp /data/local/tmp/classes.dex Test' + if interpreter: + self._description = 'Art interpreter on target' + self._id = 'TInt' + self._dalvik_args = '-Xint ' + self._dalvik_args + else: + self._description = 'Art optimizing on target' + self._id = 'TOpt' + self._jack_args = '-cp ' + GetJackClassPath() + ' --output-dex . Test.java' + + def CompileAndRunTest(self): + if RunCommand('jack', self._jack_args, + out=None, err='jackerr.txt', timeout=30) == EXIT_SUCCESS: + if RunCommand('adb push', 'classes.dex /data/local/tmp/', + 'adb.txt', err=None) != EXIT_SUCCESS: + raise FatalError('Cannot push to target device') + out = self.GetId() + '_run_out.txt' + retc = RunCommand('adb shell dalvikvm', self._dalvik_args, out, err=None) + if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT: + retc = EXIT_NOTRUN + else: + retc = EXIT_NOTCOMPILED + # Cleanup and return. + RunCommand('rm', '-f classes.dex jackerr.txt adb.txt', + out=None, err=None) + RunCommand('adb shell', 'rm -f /data/local/tmp/classes.dex', + out=None, err=None) + return retc + +# +# Tester classes. +# + +class FatalError(Exception): + """Fatal error in the tester.""" + pass + +class JavaFuzzTester(object): + """Tester that runs JavaFuzz many times and report divergences.""" + + def __init__(self, num_tests, mode1, mode2): + """Constructor for the tester. + + Args: + num_tests: int, number of tests to run + mode1: string, execution mode for first runner + mode2: string, execution mode for second runner + """ + self._num_tests = num_tests + self._runner1 = GetExecutionModeRunner(mode1) + self._runner2 = GetExecutionModeRunner(mode2) + self._save_dir = None + self._tmp_dir = None + # Statistics. + self._test = 0 + self._num_success = 0 + self._num_not_compiled = 0 + self._num_not_run = 0 + self._num_timed_out = 0 + self._num_divergences = 0 + + def __enter__(self): + """On entry, enters new temp directory after saving current directory. + + Raises: + FatalError: error when temp directory cannot be constructed + """ + self._save_dir = os.getcwd() + self._tmp_dir = mkdtemp(dir="/tmp/") + if self._tmp_dir == None: + raise FatalError('Cannot obtain temp directory') + os.chdir(self._tmp_dir) + return self + + def __exit__(self, etype, evalue, etraceback): + """On exit, re-enters previously saved current directory and cleans up.""" + os.chdir(self._save_dir) + if self._num_divergences == 0: + RunCommand('rm', '-rf ' + self._tmp_dir, out=None, err=None) + + def Run(self): + """Runs JavaFuzz many times and report divergences.""" + print + print '**\n**** JavaFuzz Testing\n**' + print + print '#Tests :', self._num_tests + print 'Directory :', self._tmp_dir + print 'Exec-mode1:', self._runner1.GetDescription() + print 'Exec-mode2:', self._runner2.GetDescription() + print + self.ShowStats() + for self._test in range(1, self._num_tests + 1): + self.RunJavaFuzzTest() + self.ShowStats() + if self._num_divergences == 0: + print '\n\nsuccess (no divergences)\n' + else: + print '\n\nfailure (divergences)\n' + + def ShowStats(self): + """Shows current statistics (on same line) while tester is running.""" + print '\rTests:', self._test, \ + 'Success:', self._num_success, \ + 'Not-compiled:', self._num_not_compiled, \ + 'Not-run:', self._num_not_run, \ + 'Timed-out:', self._num_timed_out, \ + 'Divergences:', self._num_divergences, + sys.stdout.flush() + + def RunJavaFuzzTest(self): + """Runs a single JavaFuzz test, comparing two execution modes.""" + self.ConstructTest() + retc1 = self._runner1.CompileAndRunTest() + retc2 = self._runner2.CompileAndRunTest() + self.CheckForDivergence(retc1, retc2) + self.CleanupTest() + + def ConstructTest(self): + """Use JavaFuzz to generate next Test.java test. + + Raises: + FatalError: error when javafuzz fails + """ + if RunCommand('javafuzz', args=None, + out='Test.java', err=None) != EXIT_SUCCESS: + raise FatalError('Unexpected error while running JavaFuzz') + + def CheckForDivergence(self, retc1, retc2): + """Checks for divergences and updates statistics. + + Args: + retc1: int, normalized return code of first runner + retc2: int, normalized return code of second runner + """ + if retc1 == retc2: + # Non-divergent in return code. + if retc1 == EXIT_SUCCESS: + # Both compilations and runs were successful, inspect generated output. + args = self._runner1.GetId() + '_run_out.txt ' \ + + self._runner2.GetId() + '_run_out.txt' + if RunCommand('diff', args, out=None, err=None) != EXIT_SUCCESS: + self.ReportDivergence('divergence in output') + else: + self._num_success += 1 + elif retc1 == EXIT_TIMEOUT: + self._num_timed_out += 1 + elif retc1 == EXIT_NOTCOMPILED: + self._num_not_compiled += 1 + else: + self._num_not_run += 1 + else: + # Divergent in return code. + self.ReportDivergence('divergence in return code: ' + + GetReturnCode(retc1) + ' vs. ' + + GetReturnCode(retc2)) + + def ReportDivergence(self, reason): + """Reports and saves a divergence.""" + self._num_divergences += 1 + print '\n', self._test, reason + # Save. + ddir = 'divergence' + str(self._test) + RunCommand('mkdir', ddir, out=None, err=None) + RunCommand('mv', 'Test.java *.txt ' + ddir, out=None, err=None) + + def CleanupTest(self): + """Cleans up after a single test run.""" + RunCommand('rm', '-f Test.java *.txt', out=None, err=None) + + +def main(): + # Handle arguments. + parser = argparse.ArgumentParser() + parser.add_argument('--num_tests', default=10000, + type=int, help='number of tests to run') + parser.add_argument('--mode1', default='ri', + help='execution mode 1 (default: ri)') + parser.add_argument('--mode2', default='hopt', + help='execution mode 2 (default: hopt)') + args = parser.parse_args() + if args.mode1 == args.mode2: + raise FatalError("Identical execution modes given") + # Run the JavaFuzz tester. + with JavaFuzzTester(args.num_tests, args.mode1, args.mode2) as fuzzer: + fuzzer.Run() + +if __name__ == "__main__": + main() |