diff options
| -rw-r--r-- | compiler/common_compiler_test.cc | 21 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 20 | ||||
| -rw-r--r-- | compiler/optimizing/reference_type_propagation.cc | 16 | ||||
| -rw-r--r-- | runtime/base/allocator.h | 9 | ||||
| -rw-r--r-- | runtime/dex_file_verifier.cc | 10 | ||||
| -rw-r--r-- | runtime/dex_file_verifier.h | 30 | ||||
| -rw-r--r-- | runtime/exception_test.cc | 23 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.cc | 29 | ||||
| -rw-r--r-- | runtime/gc/collector/concurrent_copying.h | 1 | ||||
| -rwxr-xr-x | tools/run-jdwp-tests.sh | 9 |
10 files changed, 136 insertions, 32 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 151437b4cb..c37cecaeac 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -77,11 +77,10 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) { header_code_and_maps_chunks_.push_back(std::vector<uint8_t>()); std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back(); - size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table_size + - gc_map_size; - size_t code_offset = compiled_method->AlignCode(size - code_size); - size_t padding = code_offset - (size - code_size); - chunk->reserve(padding + size); + const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet()); + const size_t size = + gc_map_size + mapping_table_size + vmap_table.size() + sizeof(method_header) + code_size; + chunk->reserve(size + max_padding); chunk->resize(sizeof(method_header)); memcpy(&(*chunk)[0], &method_header, sizeof(method_header)); chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end()); @@ -91,10 +90,16 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) { if (gc_map_used) { chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end()); } - chunk->insert(chunk->begin(), padding, 0); chunk->insert(chunk->end(), code.begin(), code.end()); - CHECK_EQ(padding + size, chunk->size()); - const void* code_ptr = &(*chunk)[code_offset]; + CHECK_EQ(chunk->size(), size); + const void* unaligned_code_ptr = chunk->data() + (size - code_size); + size_t offset = dchecked_integral_cast<size_t>(reinterpret_cast<uintptr_t>(unaligned_code_ptr)); + size_t padding = compiled_method->AlignCode(offset) - offset; + // Make sure no resizing takes place. + CHECK_GE(chunk->capacity(), chunk->size() + padding); + chunk->insert(chunk->begin(), padding, 0); + const void* code_ptr = reinterpret_cast<const uint8_t*>(unaligned_code_ptr) + padding; + CHECK_EQ(code_ptr, static_cast<const void*>(chunk->data() + (chunk->size() - code_size))); MakeExecutable(code_ptr, code.size()); const void* method_code = CompiledMethod::CodePointer(code_ptr, compiled_method->GetInstructionSet()); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 6aed4447f7..e6b9273d24 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -3118,15 +3118,25 @@ void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) { } void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) { - LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetOut(Location::RequiresRegister()); + InvokeRuntimeCallingConvention calling_convention; + CodeGenerator::CreateLoadClassLocationSummary( + cls, + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + Location::RegisterLocation(V0)); } void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) { LocationSummary* locations = cls->GetLocations(); + if (cls->NeedsAccessCheck()) { + codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex()); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess), + cls, + cls->GetDexPc(), + nullptr, + IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess)); + return; + } + Register out = locations->Out().AsRegister<Register>(); Register current_method = locations->InAt(0).AsRegister<Register>(); if (cls->IsReferrersClass()) { diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 26a05da4cb..659da068a9 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -373,12 +373,18 @@ void RTPVisitor::SetClassAsTypeInfo(HInstruction* instr, if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) { // Calls to String.<init> are replaced with a StringFactory. if (kIsDebugBuild) { - ScopedObjectAccess soa(Thread::Current()); + HInvoke* invoke = instr->AsInvoke(); ClassLinker* cl = Runtime::Current()->GetClassLinker(); - mirror::DexCache* dex_cache = cl->FindDexCache( - soa.Self(), instr->AsInvoke()->GetDexFile(), false); - ArtMethod* method = dex_cache->GetResolvedMethod( - instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize()); + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<2> hs(soa.Self()); + Handle<mirror::DexCache> dex_cache( + hs.NewHandle(cl->FindDexCache(soa.Self(), invoke->GetDexFile(), false))); + // Use a null loader. We should probably use the compiling method's class loader, + // but then we would need to pass it to RTPVisitor just for this debug check. Since + // the method is from the String class, the null loader is good enough. + Handle<mirror::ClassLoader> loader; + ArtMethod* method = cl->ResolveMethod( + invoke->GetDexFile(), invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect); DCHECK(method != nullptr); mirror::Class* declaring_class = method->GetDeclaringClass(); DCHECK(declaring_class != nullptr); diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h index ad255b8694..969f5b953f 100644 --- a/runtime/base/allocator.h +++ b/runtime/base/allocator.h @@ -22,6 +22,7 @@ #include <unordered_map> #include "atomic.h" +#include "base/hash_map.h" #include "base/macros.h" #include "base/mutex.h" #include "base/type_static_if.h" @@ -170,6 +171,14 @@ template<class Key, using AllocationTrackingUnorderedMap = std::unordered_map< Key, T, Hash, Pred, TrackingAllocator<std::pair<const Key, T>, kTag>>; +template<class Key, + class T, + class EmptyFn, + AllocatorTag kTag, + class Hash = std::hash<Key>, + class Pred = std::equal_to<Key>> +using AllocationTrackingHashMap = HashMap< + Key, T, EmptyFn, Hash, Pred, TrackingAllocator<std::pair<Key, T>, kTag>>; } // namespace art #endif // ART_RUNTIME_BASE_ALLOCATOR_H_ diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc index a5f9d09900..440d696ea9 100644 --- a/runtime/dex_file_verifier.cc +++ b/runtime/dex_file_verifier.cc @@ -1416,7 +1416,12 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t section_c } if (IsDataSectionType(type)) { - offset_to_type_map_.Put(aligned_offset, type); + if (aligned_offset == 0u) { + ErrorStringPrintf("Item %d offset is 0", i); + return false; + } + DCHECK(offset_to_type_map_.Find(aligned_offset) == offset_to_type_map_.end()); + offset_to_type_map_.Insert(std::pair<uint32_t, uint16_t>(aligned_offset, type)); } aligned_offset = ptr_ - begin_; @@ -1589,7 +1594,8 @@ bool DexFileVerifier::CheckIntraSection() { } bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) { - auto it = offset_to_type_map_.find(offset); + DCHECK_NE(offset, 0u); + auto it = offset_to_type_map_.Find(offset); if (UNLIKELY(it == offset_to_type_map_.end())) { ErrorStringPrintf("No data map entry found @ %zx; expected %x", offset, type); return false; diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h index 4f15357ea0..6c63749f04 100644 --- a/runtime/dex_file_verifier.h +++ b/runtime/dex_file_verifier.h @@ -175,7 +175,35 @@ class DexFileVerifier { const char* const location_; const DexFile::Header* const header_; - AllocationTrackingSafeMap<uint32_t, uint16_t, kAllocatorTagDexFileVerifier> offset_to_type_map_; + struct OffsetTypeMapEmptyFn { + // Make a hash map slot empty by making the offset 0. Offset 0 is a valid dex file offset that + // is in the offset of the dex file header. However, we only store data section items in the + // map, and these are after the header. + void MakeEmpty(std::pair<uint32_t, uint16_t>& pair) const { + pair.first = 0u; + } + // Check if a hash map slot is empty. + bool IsEmpty(const std::pair<uint32_t, uint16_t>& pair) const { + return pair.first == 0; + } + }; + struct OffsetTypeMapHashCompareFn { + // Hash function for offset. + size_t operator()(const uint32_t key) const { + return key; + } + // std::equal function for offset. + bool operator()(const uint32_t a, const uint32_t b) const { + return a == b; + } + }; + // Map from offset to dex file type, HashMap for performance reasons. + AllocationTrackingHashMap<uint32_t, + uint16_t, + OffsetTypeMapEmptyFn, + kAllocatorTagDexFileVerifier, + OffsetTypeMapHashCompareFn, + OffsetTypeMapHashCompareFn> offset_to_type_map_; const uint8_t* ptr_; const void* previous_item_; diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc index b1d4d35077..18ccd082ec 100644 --- a/runtime/exception_test.cc +++ b/runtime/exception_test.cc @@ -92,10 +92,25 @@ class ExceptionTest : public CommonRuntimeTest { fake_header_code_and_maps_.insert(fake_header_code_and_maps_.end(), fake_code_.begin(), fake_code_.end()); - // NOTE: Don't align the code (it will not be executed) but check that the Thumb2 - // adjustment will be a NOP, see EntryPointToCodePointer(). - CHECK_ALIGNED(mapping_table_offset, 2); - const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset]; + // Align the code. + const size_t alignment = GetInstructionSetAlignment(kRuntimeISA); + fake_header_code_and_maps_.reserve(fake_header_code_and_maps_.size() + alignment); + const void* unaligned_code_ptr = + fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size); + size_t offset = dchecked_integral_cast<size_t>(reinterpret_cast<uintptr_t>(unaligned_code_ptr)); + size_t padding = RoundUp(offset, alignment) - offset; + // Make sure no resizing takes place. + CHECK_GE(fake_header_code_and_maps_.capacity(), fake_header_code_and_maps_.size() + padding); + fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), padding, 0); + const void* code_ptr = reinterpret_cast<const uint8_t*>(unaligned_code_ptr) + padding; + CHECK_EQ(code_ptr, + static_cast<const void*>(fake_header_code_and_maps_.data() + + (fake_header_code_and_maps_.size() - code_size))); + + if (kRuntimeISA == kArm) { + // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer(). + CHECK_ALIGNED(mapping_table_offset, 2); + } method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*)); ASSERT_TRUE(method_f_ != nullptr); diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index d2d12af6b4..e433b8d908 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -36,13 +36,16 @@ namespace art { namespace gc { namespace collector { +static constexpr size_t kDefaultGcMarkStackSize = 2 * MB; + ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "concurrent copying + mark sweep"), region_space_(nullptr), gc_barrier_(new Barrier(0)), gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", - 2 * MB, 2 * MB)), + kDefaultGcMarkStackSize, + kDefaultGcMarkStackSize)), mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock), thread_running_gc_(nullptr), is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), @@ -577,6 +580,18 @@ void ConcurrentCopying::IssueEmptyCheckpoint() { Locks::mutator_lock_->SharedLock(self); } +void ConcurrentCopying::ExpandGcMarkStack() { + DCHECK(gc_mark_stack_->IsFull()); + const size_t new_size = gc_mark_stack_->Capacity() * 2; + std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(), + gc_mark_stack_->End()); + gc_mark_stack_->Resize(new_size); + for (auto& ref : temp) { + gc_mark_stack_->PushBack(ref.AsMirrorPtr()); + } + DCHECK(!gc_mark_stack_->IsFull()); +} + void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) << " " << to_ref << " " << PrettyTypeOf(to_ref); @@ -587,7 +602,9 @@ void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { if (self == thread_running_gc_) { // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. CHECK(self->GetThreadLocalMarkStack() == nullptr); - CHECK(!gc_mark_stack_->IsFull()); + if (UNLIKELY(gc_mark_stack_->IsFull())) { + ExpandGcMarkStack(); + } gc_mark_stack_->PushBack(to_ref); } else { // Otherwise, use a thread-local mark stack. @@ -621,7 +638,9 @@ void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { } else if (mark_stack_mode == kMarkStackModeShared) { // Access the shared GC mark stack with a lock. MutexLock mu(self, mark_stack_lock_); - CHECK(!gc_mark_stack_->IsFull()); + if (UNLIKELY(gc_mark_stack_->IsFull())) { + ExpandGcMarkStack(); + } gc_mark_stack_->PushBack(to_ref); } else { CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), @@ -633,7 +652,9 @@ void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { << "Only GC-running thread should access the mark stack " << "in the GC exclusive mark stack mode"; // Access the GC mark stack without a lock. - CHECK(!gc_mark_stack_->IsFull()); + if (UNLIKELY(gc_mark_stack_->IsFull())) { + ExpandGcMarkStack(); + } gc_mark_stack_->PushBack(to_ref); } } diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index 8efad731b8..c32b19ea3a 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -182,6 +182,7 @@ class ConcurrentCopying : public GarbageCollector { void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_); void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_); + void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_); space::RegionSpace* region_space_; // The underlying region space. std::unique_ptr<Barrier> gc_barrier_; diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh index 9aed271c82..80f040219e 100755 --- a/tools/run-jdwp-tests.sh +++ b/tools/run-jdwp-tests.sh @@ -30,8 +30,6 @@ fi art="/data/local/tmp/system/bin/art" art_debugee="sh /data/local/tmp/system/bin/art" -# We use Quick's image on target because optimizing's image is not compiled debuggable. -image="-Ximage:/data/art-test/core.art" args=$@ debuggee_args="-Xcompiler-option --debuggable" device_dir="--device-dir=/data/local/tmp" @@ -41,6 +39,8 @@ vm_command="--vm-command=$art" image_compiler_option="" debug="no" verbose="no" +image="" +vm_args="" # By default, we run the whole JDWP test suite. test="org.apache.harmony.jpda.tests.share.AllTests" @@ -88,7 +88,10 @@ while true; do fi done -vm_args="--vm-arg $image --vm-arg -Xusejit:true" +if [[ "$image" != "" ]]; then + vm_args="--vm-arg $image" +fi +vm_args="$vm_args --vm-arg -Xusejit:true" debuggee_args="$debuggee_args -Xusejit:true" if [[ $debug == "yes" ]]; then art="$art -d" |