diff options
114 files changed, 2445 insertions, 2305 deletions
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h index 87e679fbea..f2002a0af6 100644 --- a/compiler/debug/elf_debug_info_writer.h +++ b/compiler/debug/elf_debug_info_writer.h @@ -204,12 +204,13 @@ class ElfCompilationUnitWriter { // Decode dex register locations for all stack maps. // It might be expensive, so do it just once and reuse the result. + std::unique_ptr<const CodeInfo> code_info; std::vector<DexRegisterMap> dex_reg_maps; if (accessor.HasCodeItem() && mi->code_info != nullptr) { - const CodeInfo code_info(mi->code_info); - for (size_t s = 0; s < code_info.GetNumberOfStackMaps(); ++s) { - const StackMap stack_map = code_info.GetStackMapAt(s); - dex_reg_maps.push_back(code_info.GetDexRegisterMapOf( + code_info.reset(new CodeInfo(mi->code_info)); + for (size_t s = 0; s < code_info->GetNumberOfStackMaps(); ++s) { + const StackMap stack_map = code_info->GetStackMapAt(s); + dex_reg_maps.push_back(code_info->GetDexRegisterMapOf( stack_map, accessor.RegistersSize())); } } diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h index c1bf915212..8cb4e55bbc 100644 --- a/compiler/debug/elf_debug_loc_writer.h +++ b/compiler/debug/elf_debug_loc_writer.h @@ -149,11 +149,9 @@ static std::vector<VariableLocation> GetVariableLocations( DexRegisterMap dex_register_map = dex_register_maps[stack_map_index]; DCHECK(dex_register_map.IsValid()); CodeItemDataAccessor accessor(*method_info->dex_file, method_info->code_item); - reg_lo = dex_register_map.GetDexRegisterLocation( - vreg, accessor.RegistersSize(), code_info); + reg_lo = dex_register_map.GetDexRegisterLocation(vreg); if (is64bitValue) { - reg_hi = dex_register_map.GetDexRegisterLocation( - vreg + 1, accessor.RegistersSize(), code_info); + reg_hi = dex_register_map.GetDexRegisterLocation(vreg + 1); } // Add location entry for this address range. diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 16f2d0f2cc..653e9edb45 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -391,7 +391,7 @@ static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel DCHECK(driver.GetCompilerOptions().IsQuickeningCompilationEnabled()); const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = runtime->GetClassLinker(); - mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader); + ObjPtr<mirror::Class> klass = class_linker->FindClass(self, descriptor, class_loader); if (klass == nullptr) { CHECK(self->IsExceptionPending()); self->ClearException(); diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index 856cb36266..491e61f9b5 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -88,7 +88,7 @@ class CompilerDriverTest : public CommonCompilerTest { StackHandleScope<1> hs(soa.Self()); Handle<mirror::ClassLoader> loader( hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader))); - mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader); + ObjPtr<mirror::Class> c = class_linker->FindClass(soa.Self(), descriptor, loader); CHECK(c != nullptr); const auto pointer_size = class_linker->GetImagePointerSize(); for (auto& m : c->GetMethods(pointer_size)) { @@ -115,14 +115,14 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(soa.Self(), dex); EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings()); for (size_t i = 0; i < dex_cache->NumStrings(); i++) { - const mirror::String* string = dex_cache->GetResolvedString(dex::StringIndex(i)); + const ObjPtr<mirror::String> string = dex_cache->GetResolvedString(dex::StringIndex(i)); EXPECT_TRUE(string != nullptr) << "string_idx=" << i; } EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes()); for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { - mirror::Class* type = dex_cache->GetResolvedType(dex::TypeIndex(i)); - EXPECT_TRUE(type != nullptr) << "type_idx=" << i - << " " << dex.GetTypeDescriptor(dex.GetTypeId(dex::TypeIndex(i))); + const ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(dex::TypeIndex(i)); + EXPECT_TRUE(type != nullptr) + << "type_idx=" << i << " " << dex.GetTypeDescriptor(dex.GetTypeId(dex::TypeIndex(i))); } EXPECT_TRUE(dex_cache->StaticMethodSize() == dex_cache->NumResolvedMethods() || dex.NumMethodIds() == dex_cache->NumResolvedMethods()); @@ -228,7 +228,7 @@ class CompilerDriverProfileTest : public CompilerDriverTest { StackHandleScope<1> hs(self); Handle<mirror::ClassLoader> h_loader( hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader))); - mirror::Class* klass = class_linker->FindClass(self, clazz.c_str(), h_loader); + ObjPtr<mirror::Class> klass = class_linker->FindClass(self, clazz.c_str(), h_loader); ASSERT_NE(klass, nullptr); const auto pointer_size = class_linker->GetImagePointerSize(); @@ -289,7 +289,7 @@ class CompilerDriverVerifyTest : public CompilerDriverTest { StackHandleScope<1> hs(self); Handle<mirror::ClassLoader> h_loader( hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader))); - mirror::Class* klass = class_linker->FindClass(self, clazz.c_str(), h_loader); + ObjPtr<mirror::Class> klass = class_linker->FindClass(self, clazz.c_str(), h_loader); ASSERT_NE(klass, nullptr); EXPECT_TRUE(klass->IsVerified()); diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index da1db4593b..15c07870a1 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -34,6 +34,7 @@ #include "mirror/object_array-inl.h" #include "mirror/stack_trace_element.h" #include "oat_quick_method_header.h" +#include "obj_ptr-inl.h" #include "optimizing/stack_map_stream.h" #include "runtime-inl.h" #include "scoped_thread_state_change-inl.h" @@ -122,7 +123,7 @@ class ExceptionTest : public CommonRuntimeTest { ArtMethod* method_g_; private: - mirror::Class* my_klass_; + ObjPtr<mirror::Class> my_klass_; }; TEST_F(ExceptionTest, FindCatchHandler) { diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index de1be5b871..b358bfabe0 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1161,8 +1161,8 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, // last emitted is different than the native pc of the stack map just emitted. size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps(); if (number_of_stack_maps > 1) { - DCHECK_NE(stack_map_stream->GetStackMap(number_of_stack_maps - 1).native_pc_code_offset, - stack_map_stream->GetStackMap(number_of_stack_maps - 2).native_pc_code_offset); + DCHECK_NE(stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 1), + stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 2)); } } } @@ -1174,8 +1174,7 @@ bool CodeGenerator::HasStackMapAtCurrentPc() { if (count == 0) { return false; } - CodeOffset native_pc_offset = stack_map_stream->GetStackMap(count - 1).native_pc_code_offset; - return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc); + return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc; } void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 7f3441fdf4..8be84a15bd 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -1042,8 +1042,7 @@ void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) { // Adjust native pc offsets in stack maps. StackMapStream* stack_map_stream = GetStackMapStream(); for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) { - uint32_t old_position = - stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips); + uint32_t old_position = stack_map_stream->GetStackMapNativePcOffset(i); uint32_t new_position = __ GetAdjustedPosition(old_position); DCHECK_GE(new_position, old_position); stack_map_stream->SetStackMapNativePcOffset(i, new_position); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index ee32b96daf..cd9e0e521e 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -988,8 +988,7 @@ void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) { // Adjust native pc offsets in stack maps. StackMapStream* stack_map_stream = GetStackMapStream(); for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) { - uint32_t old_position = - stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64); + uint32_t old_position = stack_map_stream->GetStackMapNativePcOffset(i); uint32_t new_position = __ GetAdjustedPosition(old_position); DCHECK_GE(new_position, old_position); stack_map_stream->SetStackMapNativePcOffset(i, new_position); diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index dfe6d791c6..056f533398 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -272,7 +272,8 @@ IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo ClassLinker* class_linker = runtime->GetClassLinker(); gc::Heap* heap = runtime->GetHeap(); IntegerValueOfInfo info; - info.integer_cache = class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;"); + info.integer_cache = + class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;").Ptr(); if (info.integer_cache == nullptr) { self->ClearException(); return info; @@ -281,7 +282,7 @@ IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo // Optimization only works if the class is initialized and in the boot image. return info; } - info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;"); + info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;").Ptr(); if (info.integer == nullptr) { self->ClearException(); return info; diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 5287b4b2fa..fecf1ccbfa 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -25,7 +25,7 @@ #include "mirror/array-inl.h" #include "mirror/object_array-inl.h" #include "mirror/reference.h" -#include "mirror/string.h" +#include "mirror/string-inl.h" #include "scoped_thread_state_change-inl.h" #include "thread-current-inl.h" diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index c6e375a1b2..b1dcb68415 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -25,6 +25,14 @@ namespace art { +uint32_t StackMapStream::GetStackMapNativePcOffset(size_t i) { + return StackMap::UnpackNativePc(stack_maps_[i].packed_native_pc, instruction_set_); +} + +void StackMapStream::SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) { + stack_maps_[i].packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_); +} + void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, uint32_t native_pc_offset, uint32_t register_mask, @@ -33,7 +41,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, uint8_t inlining_depth) { DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry"; current_entry_.dex_pc = dex_pc; - current_entry_.native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_); + current_entry_.packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_); current_entry_.register_mask = register_mask; current_entry_.sp_mask = sp_mask; current_entry_.inlining_depth = inlining_depth; @@ -48,10 +56,6 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream); current_entry_.dex_register_entry.live_dex_registers_mask->ClearAllBits(); } - if (sp_mask != nullptr) { - stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet()); - } - current_dex_register_ = 0; } @@ -217,27 +221,36 @@ size_t StackMapStream::PrepareForFillIn() { PrepareMethodIndices(); // Dedup stack masks. Needs to be done first as it modifies the stack map entry. - size_t stack_mask_bits = stack_mask_max_ + 1; // Need room for max element too. - size_t num_stack_masks = PrepareStackMasks(stack_mask_bits); + BitmapTableBuilder stack_mask_builder(allocator_); + for (StackMapEntry& stack_map : stack_maps_) { + BitVector* mask = stack_map.sp_mask; + size_t num_bits = (mask != nullptr) ? mask->GetNumberOfBits() : 0; + if (num_bits != 0) { + stack_map.stack_mask_index = stack_mask_builder.Dedup(mask->GetRawStorage(), num_bits); + } else { + stack_map.stack_mask_index = StackMap::kNoValue; + } + } // Dedup register masks. Needs to be done first as it modifies the stack map entry. - size_t num_register_masks = PrepareRegisterMasks(); - - // Write dex register maps. - MemoryRegion dex_register_map_region = - EncodeMemoryRegion(&out_, &bit_offset, dex_register_map_bytes * kBitsPerByte); - for (DexRegisterMapEntry& entry : dex_register_entries_) { - size_t entry_size = entry.ComputeSize(location_catalog_entries_.size()); - if (entry_size != 0) { - DexRegisterMap dex_register_map( - dex_register_map_region.Subregion(entry.offset, entry_size)); - FillInDexRegisterMap(dex_register_map, - entry.num_dex_registers, - *entry.live_dex_registers_mask, - entry.locations_start_index); + BitTableBuilder<std::array<uint32_t, RegisterMask::kCount>> register_mask_builder(allocator_); + for (StackMapEntry& stack_map : stack_maps_) { + uint32_t register_mask = stack_map.register_mask; + if (register_mask != 0) { + uint32_t shift = LeastSignificantBit(register_mask); + std::array<uint32_t, RegisterMask::kCount> entry = { + register_mask >> shift, + shift, + }; + stack_map.register_mask_index = register_mask_builder.Dedup(&entry); + } else { + stack_map.register_mask_index = StackMap::kNoValue; } } + // Allocate space for dex register maps. + EncodeMemoryRegion(&out_, &bit_offset, dex_register_map_bytes * kBitsPerByte); + // Write dex register catalog. EncodeVarintBits(&out_, &bit_offset, location_catalog_entries_.size()); size_t location_catalog_bytes = ComputeDexRegisterLocationCatalogSize(); @@ -261,7 +274,7 @@ size_t StackMapStream::PrepareForFillIn() { for (const StackMapEntry& entry : stack_maps_) { if (entry.dex_method_index != dex::kDexNoIndex) { std::array<uint32_t, InvokeInfo::kCount> invoke_info_entry { - entry.native_pc_code_offset.CompressedValue(), + entry.packed_native_pc, entry.invoke_type, entry.dex_method_index_idx }; @@ -289,7 +302,7 @@ size_t StackMapStream::PrepareForFillIn() { inline_info_builder.Add(inline_info_entry); } std::array<uint32_t, StackMap::kCount> stack_map_entry { - entry.native_pc_code_offset.CompressedValue(), + entry.packed_native_pc, entry.dex_pc, dex_register_entries_[entry.dex_register_map_index].offset, entry.inlining_depth != 0 ? inline_info_index : InlineInfo::kNoValue, @@ -301,31 +314,8 @@ size_t StackMapStream::PrepareForFillIn() { stack_map_builder.Encode(&out_, &bit_offset); invoke_info_builder.Encode(&out_, &bit_offset); inline_info_builder.Encode(&out_, &bit_offset); - - // Write register masks table. - BitTableBuilder<uint32_t> register_mask_builder(allocator_); - for (size_t i = 0; i < num_register_masks; ++i) { - register_mask_builder.Add(register_masks_[i]); - } register_mask_builder.Encode(&out_, &bit_offset); - - // Write stack masks table. - EncodeVarintBits(&out_, &bit_offset, stack_mask_bits); - out_.resize(BitsToBytesRoundUp(bit_offset + stack_mask_bits * num_stack_masks)); - BitMemoryRegion stack_mask_region(MemoryRegion(out_.data(), out_.size()), - bit_offset, - stack_mask_bits * num_stack_masks); - if (stack_mask_bits > 0) { - for (size_t i = 0; i < num_stack_masks; ++i) { - size_t stack_mask_bytes = BitsToBytesRoundUp(stack_mask_bits); - BitMemoryRegion src(MemoryRegion(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes)); - BitMemoryRegion dst = stack_mask_region.Subregion(i * stack_mask_bits, stack_mask_bits); - for (size_t bit_index = 0; bit_index < stack_mask_bits; bit_index += BitSizeOf<uint32_t>()) { - size_t num_bits = std::min<size_t>(stack_mask_bits - bit_index, BitSizeOf<uint32_t>()); - dst.StoreBits(bit_index, src.LoadBits(bit_index, num_bits), num_bits); - } - } - } + stack_mask_builder.Encode(&out_, &bit_offset); return UnsignedLeb128Size(out_.size()) + out_.size(); } @@ -338,6 +328,22 @@ void StackMapStream::FillInCodeInfo(MemoryRegion region) { uint8_t* ptr = EncodeUnsignedLeb128(region.begin(), out_.size()); region.CopyFromVector(ptr - region.begin(), out_); + // Write dex register maps. + CodeInfo code_info(region); + for (DexRegisterMapEntry& entry : dex_register_entries_) { + size_t entry_size = entry.ComputeSize(location_catalog_entries_.size()); + if (entry_size != 0) { + DexRegisterMap dex_register_map( + code_info.dex_register_maps_.Subregion(entry.offset, entry_size), + entry.num_dex_registers, + code_info); + FillInDexRegisterMap(dex_register_map, + entry.num_dex_registers, + *entry.live_dex_registers_mask, + entry.locations_start_index); + } + } + // Verify all written data in debug build. if (kIsDebugBuild) { CheckCodeInfo(region); @@ -362,7 +368,6 @@ void StackMapStream::FillInDexRegisterMap(DexRegisterMap dex_register_map, dex_register_map.SetLocationCatalogEntryIndex( index_in_dex_register_locations, location_catalog_entry_index, - num_dex_registers, location_catalog_entries_.size()); } } @@ -419,8 +424,7 @@ bool StackMapStream::DexRegisterMapEntryEquals(const DexRegisterMapEntry& a, } // Helper for CheckCodeInfo - check that register map has the expected content. -void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info, - const DexRegisterMap& dex_register_map, +void StackMapStream::CheckDexRegisterMap(const DexRegisterMap& dex_register_map, size_t num_dex_registers, BitVector* live_dex_registers_mask, size_t dex_register_locations_index) const { @@ -437,8 +441,7 @@ void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info, << dex_register_map.IsValid() << " " << dex_register_map.IsDexRegisterLive(reg); } else { DCHECK(dex_register_map.IsDexRegisterLive(reg)); - DexRegisterLocation seen = dex_register_map.GetDexRegisterLocation( - reg, num_dex_registers, code_info); + DexRegisterLocation seen = dex_register_map.GetDexRegisterLocation(reg); DCHECK_EQ(expected.GetKind(), seen.GetKind()); DCHECK_EQ(expected.GetValue(), seen.GetValue()); } @@ -448,17 +451,6 @@ void StackMapStream::CheckDexRegisterMap(const CodeInfo& code_info, } } -size_t StackMapStream::PrepareRegisterMasks() { - register_masks_.resize(stack_maps_.size(), 0u); - ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream)); - for (StackMapEntry& stack_map : stack_maps_) { - const size_t index = dedupe.size(); - stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second; - register_masks_[index] = stack_map.register_mask; - } - return dedupe.size(); -} - void StackMapStream::PrepareMethodIndices() { CHECK(method_indices_.empty()); method_indices_.resize(stack_maps_.size() + inline_infos_.size()); @@ -481,35 +473,10 @@ void StackMapStream::PrepareMethodIndices() { method_indices_.resize(dedupe.size()); } - -size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) { - // Preallocate memory since we do not want it to move (the dedup map will point into it). - const size_t byte_entry_size = RoundUp(entry_size_in_bits, kBitsPerByte) / kBitsPerByte; - stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u); - // For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later - // when copying out from stack_masks_. - ScopedArenaUnorderedMap<MemoryRegion, - size_t, - FNVHash<MemoryRegion>, - MemoryRegion::ContentEquals> dedup( - stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream)); - for (StackMapEntry& stack_map : stack_maps_) { - size_t index = dedup.size(); - MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size); - BitMemoryRegion stack_mask_bits(stack_mask); - for (size_t i = 0; i < entry_size_in_bits; i++) { - stack_mask_bits.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i)); - } - stack_map.stack_mask_index = dedup.emplace(stack_mask, index).first->second; - } - return dedup.size(); -} - // Check that all StackMapStream inputs are correctly encoded by trying to read them back. void StackMapStream::CheckCodeInfo(MemoryRegion region) const { CodeInfo code_info(region); DCHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size()); - DCHECK_EQ(code_info.GetNumberOfStackMaskBits(), static_cast<uint32_t>(stack_mask_max_ + 1)); DCHECK_EQ(code_info.GetNumberOfLocationCatalogEntries(), location_catalog_entries_.size()); size_t invoke_info_index = 0; for (size_t s = 0; s < stack_maps_.size(); ++s) { @@ -518,33 +485,29 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const { // Check main stack map fields. DCHECK_EQ(stack_map.GetNativePcOffset(instruction_set_), - entry.native_pc_code_offset.Uint32Value(instruction_set_)); + StackMap::UnpackNativePc(entry.packed_native_pc, instruction_set_)); DCHECK_EQ(stack_map.GetDexPc(), entry.dex_pc); DCHECK_EQ(stack_map.GetRegisterMaskIndex(), entry.register_mask_index); DCHECK_EQ(code_info.GetRegisterMaskOf(stack_map), entry.register_mask); - const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits(); DCHECK_EQ(stack_map.GetStackMaskIndex(), entry.stack_mask_index); BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map); if (entry.sp_mask != nullptr) { DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits()); - for (size_t b = 0; b < num_stack_mask_bits; b++) { - DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b)); + for (size_t b = 0; b < stack_mask.size_in_bits(); b++) { + DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b)) << b; } } else { - for (size_t b = 0; b < num_stack_mask_bits; b++) { - DCHECK_EQ(stack_mask.LoadBit(b), 0u); - } + DCHECK_EQ(stack_mask.size_in_bits(), 0u); } if (entry.dex_method_index != dex::kDexNoIndex) { InvokeInfo invoke_info = code_info.GetInvokeInfo(invoke_info_index); DCHECK_EQ(invoke_info.GetNativePcOffset(instruction_set_), - entry.native_pc_code_offset.Uint32Value(instruction_set_)); + StackMap::UnpackNativePc(entry.packed_native_pc, instruction_set_)); DCHECK_EQ(invoke_info.GetInvokeType(), entry.invoke_type); DCHECK_EQ(invoke_info.GetMethodIndexIdx(), entry.dex_method_index_idx); invoke_info_index++; } - CheckDexRegisterMap(code_info, - code_info.GetDexRegisterMapOf( + CheckDexRegisterMap(code_info.GetDexRegisterMapOf( stack_map, entry.dex_register_entry.num_dex_registers), entry.dex_register_entry.num_dex_registers, entry.dex_register_entry.live_dex_registers_mask, @@ -570,8 +533,7 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const { DCHECK_EQ(method_indices_[method_index_idx], inline_entry.method_index); } - CheckDexRegisterMap(code_info, - code_info.GetDexRegisterMapAtDepth( + CheckDexRegisterMap(code_info.GetDexRegisterMapAtDepth( d, inline_info, inline_entry.dex_register_entry.num_dex_registers), diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index ea97cf6530..6d505b95db 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -68,11 +68,8 @@ class StackMapStream : public ValueObject { location_catalog_entries_indices_(allocator->Adapter(kArenaAllocStackMapStream)), dex_register_locations_(allocator->Adapter(kArenaAllocStackMapStream)), inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)), - stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)), - register_masks_(allocator->Adapter(kArenaAllocStackMapStream)), method_indices_(allocator->Adapter(kArenaAllocStackMapStream)), dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)), - stack_mask_max_(-1), out_(allocator->Adapter(kArenaAllocStackMapStream)), dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter(kArenaAllocStackMapStream)), @@ -106,7 +103,7 @@ class StackMapStream : public ValueObject { // See runtime/stack_map.h to know what these fields contain. struct StackMapEntry { uint32_t dex_pc; - CodeOffset native_pc_code_offset; + uint32_t packed_native_pc; uint32_t register_mask; BitVector* sp_mask; uint32_t inlining_depth; @@ -151,14 +148,8 @@ class StackMapStream : public ValueObject { return stack_maps_.size(); } - const StackMapEntry& GetStackMap(size_t i) const { - return stack_maps_[i]; - } - - void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) { - stack_maps_[i].native_pc_code_offset = - CodeOffset::FromOffset(native_pc_offset, instruction_set_); - } + uint32_t GetStackMapNativePcOffset(size_t i); + void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset); // Prepares the stream to fill in a memory region. Must be called before FillIn. // Returns the size (in bytes) needed to store this stream. @@ -171,12 +162,6 @@ class StackMapStream : public ValueObject { private: size_t ComputeDexRegisterLocationCatalogSize() const; - // Returns the number of unique stack masks. - size_t PrepareStackMasks(size_t entry_size_in_bits); - - // Returns the number of unique register masks. - size_t PrepareRegisterMasks(); - // Prepare and deduplicate method indices. void PrepareMethodIndices(); @@ -193,8 +178,7 @@ class StackMapStream : public ValueObject { const BitVector& live_dex_registers_mask, uint32_t start_index_in_dex_register_locations) const; - void CheckDexRegisterMap(const CodeInfo& code_info, - const DexRegisterMap& dex_register_map, + void CheckDexRegisterMap(const DexRegisterMap& dex_register_map, size_t num_dex_registers, BitVector* live_dex_registers_mask, size_t dex_register_locations_index) const; @@ -217,11 +201,8 @@ class StackMapStream : public ValueObject { // A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`. ScopedArenaVector<size_t> dex_register_locations_; ScopedArenaVector<InlineInfoEntry> inline_infos_; - ScopedArenaVector<uint8_t> stack_masks_; - ScopedArenaVector<uint32_t> register_masks_; ScopedArenaVector<uint32_t> method_indices_; ScopedArenaVector<DexRegisterMapEntry> dex_register_entries_; - int stack_mask_max_; ScopedArenaVector<uint8_t> out_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 9db7588b3a..7178e6683f 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -32,10 +32,10 @@ static bool CheckStackMask( const StackMap& stack_map, const BitVector& bit_vector) { BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map); - if (bit_vector.GetNumberOfBits() > code_info.GetNumberOfStackMaskBits()) { + if (bit_vector.GetNumberOfBits() > stack_mask.size_in_bits()) { return false; } - for (size_t i = 0; i < code_info.GetNumberOfStackMaskBits(); ++i) { + for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) { if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) { return false; } @@ -45,6 +45,8 @@ static bool CheckStackMask( using Kind = DexRegisterLocation::Kind; +constexpr static uint32_t kPcAlign = GetInstructionSetInstructionAlignment(kRuntimeISA); + TEST(StackMapTest, Test1) { MallocArenaPool pool; ArenaStack arena_stack(&pool); @@ -53,7 +55,7 @@ TEST(StackMapTest, Test1) { ArenaBitVector sp_mask(&allocator, 0, false); size_t number_of_dex_registers = 2; - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Short location. stream.EndStackMapEntry(); @@ -77,9 +79,9 @@ TEST(StackMapTest, Test1) { StackMap stack_map = code_info.GetStackMapAt(0); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64 * kPcAlign))); ASSERT_EQ(0u, stack_map.GetDexPc()); - ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(64u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map)); ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask)); @@ -96,22 +98,15 @@ TEST(StackMapTest, Test1) { size_t expected_dex_register_map_size = 1u + 1u; ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size()); - ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info)); - - size_t index0 = dex_register_map.GetLocationCatalogEntryIndex( - 0, number_of_dex_registers, number_of_catalog_entries); - size_t index1 = dex_register_map.GetLocationCatalogEntryIndex( - 1, number_of_dex_registers, number_of_catalog_entries); + ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(0)); + ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1)); + ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(0)); + ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(1)); + ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0)); + ASSERT_EQ(-2, dex_register_map.GetConstant(1)); + + size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(0, number_of_catalog_entries); + size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(1, number_of_catalog_entries); ASSERT_EQ(0u, index0); ASSERT_EQ(1u, index1); DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0); @@ -138,7 +133,7 @@ TEST(StackMapTest, Test2) { sp_mask1.SetBit(4); size_t number_of_dex_registers = 2; size_t number_of_dex_registers_in_inline_info = 0; - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1, number_of_dex_registers, 2); stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info); @@ -150,7 +145,7 @@ TEST(StackMapTest, Test2) { ArenaBitVector sp_mask2(&allocator, 0, true); sp_mask2.SetBit(3); sp_mask2.SetBit(8); - stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0); + stream.BeginStackMapEntry(1, 128 * kPcAlign, 0xFF, &sp_mask2, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 18); // Short location. stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location. stream.EndStackMapEntry(); @@ -158,7 +153,7 @@ TEST(StackMapTest, Test2) { ArenaBitVector sp_mask3(&allocator, 0, true); sp_mask3.SetBit(1); sp_mask3.SetBit(5); - stream.BeginStackMapEntry(2, 192, 0xAB, &sp_mask3, number_of_dex_registers, 0); + stream.BeginStackMapEntry(2, 192 * kPcAlign, 0xAB, &sp_mask3, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 6); // Short location. stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8); // Short location. stream.EndStackMapEntry(); @@ -166,7 +161,7 @@ TEST(StackMapTest, Test2) { ArenaBitVector sp_mask4(&allocator, 0, true); sp_mask4.SetBit(6); sp_mask4.SetBit(7); - stream.BeginStackMapEntry(3, 256, 0xCD, &sp_mask4, number_of_dex_registers, 0); + stream.BeginStackMapEntry(3, 256 * kPcAlign, 0xCD, &sp_mask4, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location, same in stack map 2. stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1); // Short location. stream.EndStackMapEntry(); @@ -192,9 +187,9 @@ TEST(StackMapTest, Test2) { { StackMap stack_map = code_info.GetStackMapAt(0); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64 * kPcAlign))); ASSERT_EQ(0u, stack_map.GetDexPc()); - ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(64u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map)); ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1)); @@ -211,22 +206,15 @@ TEST(StackMapTest, Test2) { size_t expected_dex_register_map_size = 1u + 1u; ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size()); - ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info)); - - size_t index0 = dex_register_map.GetLocationCatalogEntryIndex( - 0, number_of_dex_registers, number_of_catalog_entries); - size_t index1 = dex_register_map.GetLocationCatalogEntryIndex( - 1, number_of_dex_registers, number_of_catalog_entries); + ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(0)); + ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1)); + ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(0)); + ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(1)); + ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0)); + ASSERT_EQ(-2, dex_register_map.GetConstant(1)); + + size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(0, number_of_catalog_entries); + size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(1, number_of_catalog_entries); ASSERT_EQ(0u, index0); ASSERT_EQ(1u, index1); DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0); @@ -251,9 +239,9 @@ TEST(StackMapTest, Test2) { { StackMap stack_map = code_info.GetStackMapAt(1); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u * kPcAlign))); ASSERT_EQ(1u, stack_map.GetDexPc()); - ASSERT_EQ(128u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(128u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(stack_map)); ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask2)); @@ -270,23 +258,15 @@ TEST(StackMapTest, Test2) { size_t expected_dex_register_map_size = 1u + 1u; ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size()); - ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(18, dex_register_map.GetMachineRegister( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(3, dex_register_map.GetMachineRegister( - 1, number_of_dex_registers, code_info)); - - size_t index0 = dex_register_map.GetLocationCatalogEntryIndex( - 0, number_of_dex_registers, number_of_catalog_entries); - size_t index1 = dex_register_map.GetLocationCatalogEntryIndex( - 1, number_of_dex_registers, number_of_catalog_entries); + ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(0)); + ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(1)); + ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(0)); + ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(1)); + ASSERT_EQ(18, dex_register_map.GetMachineRegister(0)); + ASSERT_EQ(3, dex_register_map.GetMachineRegister(1)); + + size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(0, number_of_catalog_entries); + size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(1, number_of_catalog_entries); ASSERT_EQ(2u, index0); ASSERT_EQ(3u, index1); DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0); @@ -305,9 +285,9 @@ TEST(StackMapTest, Test2) { { StackMap stack_map = code_info.GetStackMapAt(2); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u * kPcAlign))); ASSERT_EQ(2u, stack_map.GetDexPc()); - ASSERT_EQ(192u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(192u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(stack_map)); ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask3)); @@ -324,23 +304,15 @@ TEST(StackMapTest, Test2) { size_t expected_dex_register_map_size = 1u + 1u; ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size()); - ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationInternalKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(6, dex_register_map.GetMachineRegister( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(8, dex_register_map.GetMachineRegister( - 1, number_of_dex_registers, code_info)); - - size_t index0 = dex_register_map.GetLocationCatalogEntryIndex( - 0, number_of_dex_registers, number_of_catalog_entries); - size_t index1 = dex_register_map.GetLocationCatalogEntryIndex( - 1, number_of_dex_registers, number_of_catalog_entries); + ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(0)); + ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind(1)); + ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(0)); + ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationInternalKind(1)); + ASSERT_EQ(6, dex_register_map.GetMachineRegister(0)); + ASSERT_EQ(8, dex_register_map.GetMachineRegister(1)); + + size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(0, number_of_catalog_entries); + size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(1, number_of_catalog_entries); ASSERT_EQ(4u, index0); ASSERT_EQ(5u, index1); DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0); @@ -359,9 +331,9 @@ TEST(StackMapTest, Test2) { { StackMap stack_map = code_info.GetStackMapAt(3); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u * kPcAlign))); ASSERT_EQ(3u, stack_map.GetDexPc()); - ASSERT_EQ(256u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(256u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(stack_map)); ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask4)); @@ -378,23 +350,15 @@ TEST(StackMapTest, Test2) { size_t expected_dex_register_map_size = 1u + 1u; ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size()); - ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationInternalKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(3, dex_register_map.GetMachineRegister( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(1, dex_register_map.GetMachineRegister( - 1, number_of_dex_registers, code_info)); - - size_t index0 = dex_register_map.GetLocationCatalogEntryIndex( - 0, number_of_dex_registers, number_of_catalog_entries); - size_t index1 = dex_register_map.GetLocationCatalogEntryIndex( - 1, number_of_dex_registers, number_of_catalog_entries); + ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(0)); + ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind(1)); + ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(0)); + ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationInternalKind(1)); + ASSERT_EQ(3, dex_register_map.GetMachineRegister(0)); + ASSERT_EQ(1, dex_register_map.GetMachineRegister(1)); + + size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(0, number_of_catalog_entries); + size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(1, number_of_catalog_entries); ASSERT_EQ(3u, index0); // Shared with second stack map. ASSERT_EQ(6u, index1); DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0); @@ -422,7 +386,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { sp_mask1.SetBit(4); const size_t number_of_dex_registers = 2; const size_t number_of_dex_registers_in_inline_info = 2; - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 1); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1, number_of_dex_registers, 1); stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info); @@ -452,9 +416,9 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { { StackMap stack_map = code_info.GetStackMapAt(0); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64 * kPcAlign))); ASSERT_EQ(0u, stack_map.GetDexPc()); - ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(64u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map)); ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1)); @@ -470,20 +434,15 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { size_t expected_map_size = 1u + 1u; ASSERT_EQ(expected_map_size, map.Size()); - ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstant, - map.GetLocationKind(1, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kInStack, - map.GetLocationInternalKind(0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstantLargeValue, - map.GetLocationInternalKind(1, number_of_dex_registers, code_info)); - ASSERT_EQ(0, map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info)); - ASSERT_EQ(-2, map.GetConstant(1, number_of_dex_registers, code_info)); - - const size_t index0 = - map.GetLocationCatalogEntryIndex(0, number_of_dex_registers, number_of_catalog_entries); - const size_t index1 = - map.GetLocationCatalogEntryIndex(1, number_of_dex_registers, number_of_catalog_entries); + ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0)); + ASSERT_EQ(Kind::kConstant, map.GetLocationKind(1)); + ASSERT_EQ(Kind::kInStack, map.GetLocationInternalKind(0)); + ASSERT_EQ(Kind::kConstantLargeValue, map.GetLocationInternalKind(1)); + ASSERT_EQ(0, map.GetStackOffsetInBytes(0)); + ASSERT_EQ(-2, map.GetConstant(1)); + + const size_t index0 = map.GetLocationCatalogEntryIndex(0, number_of_catalog_entries); + const size_t index1 = map.GetLocationCatalogEntryIndex(1, number_of_catalog_entries); ASSERT_EQ(0u, index0); ASSERT_EQ(1u, index1); DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0); @@ -512,7 +471,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kNone, 0); // No location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); @@ -535,9 +494,9 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { StackMap stack_map = code_info.GetStackMapAt(0); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64 * kPcAlign))); ASSERT_EQ(0u, stack_map.GetDexPc()); - ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(64u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map)); ASSERT_TRUE(stack_map.HasDexRegisterMap()); @@ -552,20 +511,14 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { size_t expected_dex_register_map_size = 1u + 0u; ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size()); - ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationInternalKind( - 0, number_of_dex_registers, code_info)); - ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind( - 1, number_of_dex_registers, code_info)); - ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info)); - - size_t index0 = dex_register_map.GetLocationCatalogEntryIndex( - 0, number_of_dex_registers, number_of_catalog_entries); - size_t index1 = dex_register_map.GetLocationCatalogEntryIndex( - 1, number_of_dex_registers, number_of_catalog_entries); + ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationKind(0)); + ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1)); + ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationInternalKind(0)); + ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(1)); + ASSERT_EQ(-2, dex_register_map.GetConstant(1)); + + size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(0, number_of_catalog_entries); + size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(1, number_of_catalog_entries); ASSERT_EQ(DexRegisterLocationCatalog::kNoLocationEntryIndex, index0); ASSERT_EQ(0u, index1); DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0); @@ -592,7 +545,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) { ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 1024; // Create the first stack map (and its Dex register map). - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); uint32_t number_of_dex_live_registers_in_dex_register_map_0 = number_of_dex_registers - 8; for (uint32_t i = 0; i < number_of_dex_live_registers_in_dex_register_map_0; ++i) { // Use two different Dex register locations to populate this map, @@ -603,7 +556,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) { } stream.EndStackMapEntry(); // Create the second stack map (and its Dex register map). - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); for (uint32_t i = 0; i < number_of_dex_registers; ++i) { stream.AddDexRegisterEntry(Kind::kConstant, 0); // Short location. } @@ -632,8 +585,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) { StackMap stack_map0 = code_info.GetStackMapAt(0); DexRegisterMap dex_register_map0 = code_info.GetDexRegisterMapOf(stack_map0, number_of_dex_registers); - ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_dex_registers, - number_of_catalog_entries)); + ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_catalog_entries)); ASSERT_EQ(255u, dex_register_map0.Size()); StackMap stack_map1 = code_info.GetStackMapAt(1); @@ -655,17 +607,17 @@ TEST(StackMapTest, TestShareDexRegisterMap) { ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 2; // First stack map. - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 0); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); // Second stack map, which should share the same dex register map. - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 0); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); // Third stack map (doesn't share the dex register map). - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.AddDexRegisterEntry(Kind::kInRegister, 2); // Short location. stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); @@ -680,20 +632,20 @@ TEST(StackMapTest, TestShareDexRegisterMap) { // Verify first stack map. StackMap sm0 = ci.GetStackMapAt(0); DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, number_of_dex_registers); - ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci)); - ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci)); + ASSERT_EQ(0, dex_registers0.GetMachineRegister(0)); + ASSERT_EQ(-2, dex_registers0.GetConstant(1)); // Verify second stack map. StackMap sm1 = ci.GetStackMapAt(1); DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, number_of_dex_registers); - ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci)); - ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci)); + ASSERT_EQ(0, dex_registers1.GetMachineRegister(0)); + ASSERT_EQ(-2, dex_registers1.GetConstant(1)); // Verify third stack map. StackMap sm2 = ci.GetStackMapAt(2); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, number_of_dex_registers); - ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci)); - ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci)); + ASSERT_EQ(2, dex_registers2.GetMachineRegister(0)); + ASSERT_EQ(-2, dex_registers2.GetConstant(1)); // Verify dex register map offsets. ASSERT_EQ(sm0.GetDexRegisterMapOffset(), @@ -712,11 +664,11 @@ TEST(StackMapTest, TestNoDexRegisterMap) { ArenaBitVector sp_mask(&allocator, 0, false); uint32_t number_of_dex_registers = 0; - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0); stream.EndStackMapEntry(); number_of_dex_registers = 1; - stream.BeginStackMapEntry(1, 68, 0x4, &sp_mask, number_of_dex_registers, 0); + stream.BeginStackMapEntry(1, 68 * kPcAlign, 0x4, &sp_mask, number_of_dex_registers, 0); stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); @@ -734,9 +686,9 @@ TEST(StackMapTest, TestNoDexRegisterMap) { StackMap stack_map = code_info.GetStackMapAt(0); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64 * kPcAlign))); ASSERT_EQ(0u, stack_map.GetDexPc()); - ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(64u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map)); ASSERT_FALSE(stack_map.HasDexRegisterMap()); @@ -744,9 +696,9 @@ TEST(StackMapTest, TestNoDexRegisterMap) { stack_map = code_info.GetStackMapAt(1); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1))); - ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68))); + ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68 * kPcAlign))); ASSERT_EQ(1u, stack_map.GetDexPc()); - ASSERT_EQ(68u, stack_map.GetNativePcOffset(kRuntimeISA)); + ASSERT_EQ(68u * kPcAlign, stack_map.GetNativePcOffset(kRuntimeISA)); ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(stack_map)); ASSERT_FALSE(stack_map.HasDexRegisterMap()); @@ -765,7 +717,7 @@ TEST(StackMapTest, InlineTest) { sp_mask1.SetBit(4); // First stack map. - stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, 2, 2); + stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1, 2, 2); stream.AddDexRegisterEntry(Kind::kInStack, 0); stream.AddDexRegisterEntry(Kind::kConstant, 4); @@ -781,7 +733,7 @@ TEST(StackMapTest, InlineTest) { stream.EndStackMapEntry(); // Second stack map. - stream.BeginStackMapEntry(2, 22, 0x3, &sp_mask1, 2, 3); + stream.BeginStackMapEntry(2, 22 * kPcAlign, 0x3, &sp_mask1, 2, 3); stream.AddDexRegisterEntry(Kind::kInStack, 56); stream.AddDexRegisterEntry(Kind::kConstant, 0); @@ -799,13 +751,13 @@ TEST(StackMapTest, InlineTest) { stream.EndStackMapEntry(); // Third stack map. - stream.BeginStackMapEntry(4, 56, 0x3, &sp_mask1, 2, 0); + stream.BeginStackMapEntry(4, 56 * kPcAlign, 0x3, &sp_mask1, 2, 0); stream.AddDexRegisterEntry(Kind::kNone, 0); stream.AddDexRegisterEntry(Kind::kConstant, 4); stream.EndStackMapEntry(); // Fourth stack map. - stream.BeginStackMapEntry(6, 78, 0x3, &sp_mask1, 2, 3); + stream.BeginStackMapEntry(6, 78 * kPcAlign, 0x3, &sp_mask1, 2, 3); stream.AddDexRegisterEntry(Kind::kInStack, 56); stream.AddDexRegisterEntry(Kind::kConstant, 0); @@ -833,8 +785,8 @@ TEST(StackMapTest, InlineTest) { StackMap sm0 = ci.GetStackMapAt(0); DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, 2); - ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci)); - ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci)); + ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0)); + ASSERT_EQ(4, dex_registers0.GetConstant(1)); InlineInfo if0 = ci.GetInlineInfoOf(sm0); ASSERT_EQ(2u, if0.GetDepth()); @@ -844,12 +796,12 @@ TEST(StackMapTest, InlineTest) { ASSERT_TRUE(if0.EncodesArtMethodAtDepth(1)); DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, 1); - ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci)); + ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0)); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, 3); - ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci)); - ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci)); - ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci)); + ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0)); + ASSERT_EQ(20, dex_registers2.GetConstant(1)); + ASSERT_EQ(15, dex_registers2.GetMachineRegister(2)); } { @@ -857,8 +809,8 @@ TEST(StackMapTest, InlineTest) { StackMap sm1 = ci.GetStackMapAt(1); DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, 2); - ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci)); - ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci)); + ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0)); + ASSERT_EQ(0, dex_registers0.GetConstant(1)); InlineInfo if1 = ci.GetInlineInfoOf(sm1); ASSERT_EQ(3u, if1.GetDepth()); @@ -870,12 +822,12 @@ TEST(StackMapTest, InlineTest) { ASSERT_TRUE(if1.EncodesArtMethodAtDepth(2)); DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, 1); - ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci)); + ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0)); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, 3); - ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci)); - ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci)); - ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci)); + ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0)); + ASSERT_EQ(10, dex_registers2.GetConstant(1)); + ASSERT_EQ(5, dex_registers2.GetMachineRegister(2)); ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(2)); } @@ -886,7 +838,7 @@ TEST(StackMapTest, InlineTest) { DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, 2); ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0)); - ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci)); + ASSERT_EQ(4, dex_registers0.GetConstant(1)); ASSERT_FALSE(sm2.HasInlineInfo()); } @@ -895,8 +847,8 @@ TEST(StackMapTest, InlineTest) { StackMap sm3 = ci.GetStackMapAt(3); DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, 2); - ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci)); - ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci)); + ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0)); + ASSERT_EQ(0, dex_registers0.GetConstant(1)); InlineInfo if2 = ci.GetInlineInfoOf(sm3); ASSERT_EQ(3u, if2.GetDepth()); @@ -910,34 +862,39 @@ TEST(StackMapTest, InlineTest) { ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(0)); DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, 1); - ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci)); + ASSERT_EQ(2, dex_registers1.GetMachineRegister(0)); DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, 2); ASSERT_FALSE(dex_registers2.IsDexRegisterLive(0)); - ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci)); + ASSERT_EQ(3, dex_registers2.GetMachineRegister(1)); } } -TEST(StackMapTest, CodeOffsetTest) { - // Test minimum alignments, and decoding. - CodeOffset offset_thumb2 = - CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2); - CodeOffset offset_arm64 = - CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64); - CodeOffset offset_x86 = - CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86); - CodeOffset offset_x86_64 = - CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64); - CodeOffset offset_mips = - CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips); - CodeOffset offset_mips64 = - CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64); - EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment); - EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment); - EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment); - EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment); - EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment); - EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment); +TEST(StackMapTest, PackedNativePcTest) { + uint32_t packed_thumb2 = + StackMap::PackNativePc(kThumb2InstructionAlignment, InstructionSet::kThumb2); + uint32_t packed_arm64 = + StackMap::PackNativePc(kArm64InstructionAlignment, InstructionSet::kArm64); + uint32_t packed_x86 = + StackMap::PackNativePc(kX86InstructionAlignment, InstructionSet::kX86); + uint32_t packed_x86_64 = + StackMap::PackNativePc(kX86_64InstructionAlignment, InstructionSet::kX86_64); + uint32_t packed_mips = + StackMap::PackNativePc(kMipsInstructionAlignment, InstructionSet::kMips); + uint32_t packed_mips64 = + StackMap::PackNativePc(kMips64InstructionAlignment, InstructionSet::kMips64); + EXPECT_EQ(StackMap::UnpackNativePc(packed_thumb2, InstructionSet::kThumb2), + kThumb2InstructionAlignment); + EXPECT_EQ(StackMap::UnpackNativePc(packed_arm64, InstructionSet::kArm64), + kArm64InstructionAlignment); + EXPECT_EQ(StackMap::UnpackNativePc(packed_x86, InstructionSet::kX86), + kX86InstructionAlignment); + EXPECT_EQ(StackMap::UnpackNativePc(packed_x86_64, InstructionSet::kX86_64), + kX86_64InstructionAlignment); + EXPECT_EQ(StackMap::UnpackNativePc(packed_mips, InstructionSet::kMips), + kMipsInstructionAlignment); + EXPECT_EQ(StackMap::UnpackNativePc(packed_mips64, InstructionSet::kMips64), + kMips64InstructionAlignment); } TEST(StackMapTest, TestDeduplicateStackMask) { @@ -949,9 +906,9 @@ TEST(StackMapTest, TestDeduplicateStackMask) { ArenaBitVector sp_mask(&allocator, 0, true); sp_mask.SetBit(1); sp_mask.SetBit(4); - stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0); + stream.BeginStackMapEntry(0, 4 * kPcAlign, 0x3, &sp_mask, 0, 0); stream.EndStackMapEntry(); - stream.BeginStackMapEntry(0, 8, 0x3, &sp_mask, 0, 0); + stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask, 0, 0); stream.EndStackMapEntry(); size_t size = stream.PrepareForFillIn(); @@ -962,8 +919,8 @@ TEST(StackMapTest, TestDeduplicateStackMask) { CodeInfo code_info(region); ASSERT_EQ(2u, code_info.GetNumberOfStackMaps()); - StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4); - StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8); + StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4 * kPcAlign); + StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8 * kPcAlign); EXPECT_EQ(stack_map1.GetStackMaskIndex(), stack_map2.GetStackMaskIndex()); } @@ -976,13 +933,13 @@ TEST(StackMapTest, TestInvokeInfo) { ArenaBitVector sp_mask(&allocator, 0, true); sp_mask.SetBit(1); - stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0); + stream.BeginStackMapEntry(0, 4 * kPcAlign, 0x3, &sp_mask, 0, 0); stream.AddInvoke(kSuper, 1); stream.EndStackMapEntry(); - stream.BeginStackMapEntry(0, 8, 0x3, &sp_mask, 0, 0); + stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask, 0, 0); stream.AddInvoke(kStatic, 3); stream.EndStackMapEntry(); - stream.BeginStackMapEntry(0, 16, 0x3, &sp_mask, 0, 0); + stream.BeginStackMapEntry(0, 16 * kPcAlign, 0x3, &sp_mask, 0, 0); stream.AddInvoke(kDirect, 65535); stream.EndStackMapEntry(); @@ -999,9 +956,9 @@ TEST(StackMapTest, TestInvokeInfo) { MethodInfo method_info(method_info_region.begin()); ASSERT_EQ(3u, code_info.GetNumberOfStackMaps()); - InvokeInfo invoke1(code_info.GetInvokeInfoForNativePcOffset(4)); - InvokeInfo invoke2(code_info.GetInvokeInfoForNativePcOffset(8)); - InvokeInfo invoke3(code_info.GetInvokeInfoForNativePcOffset(16)); + InvokeInfo invoke1(code_info.GetInvokeInfoForNativePcOffset(4 * kPcAlign)); + InvokeInfo invoke2(code_info.GetInvokeInfoForNativePcOffset(8 * kPcAlign)); + InvokeInfo invoke3(code_info.GetInvokeInfoForNativePcOffset(16 * kPcAlign)); InvokeInfo invoke_invalid(code_info.GetInvokeInfoForNativePcOffset(12)); EXPECT_FALSE(invoke_invalid.IsValid()); // No entry for that index. EXPECT_TRUE(invoke1.IsValid()); @@ -1009,13 +966,13 @@ TEST(StackMapTest, TestInvokeInfo) { EXPECT_TRUE(invoke3.IsValid()); EXPECT_EQ(invoke1.GetInvokeType(), kSuper); EXPECT_EQ(invoke1.GetMethodIndex(method_info), 1u); - EXPECT_EQ(invoke1.GetNativePcOffset(kRuntimeISA), 4u); + EXPECT_EQ(invoke1.GetNativePcOffset(kRuntimeISA), 4u * kPcAlign); EXPECT_EQ(invoke2.GetInvokeType(), kStatic); EXPECT_EQ(invoke2.GetMethodIndex(method_info), 3u); - EXPECT_EQ(invoke2.GetNativePcOffset(kRuntimeISA), 8u); + EXPECT_EQ(invoke2.GetNativePcOffset(kRuntimeISA), 8u * kPcAlign); EXPECT_EQ(invoke3.GetInvokeType(), kDirect); EXPECT_EQ(invoke3.GetMethodIndex(method_info), 65535u); - EXPECT_EQ(invoke3.GetNativePcOffset(kRuntimeISA), 16u); + EXPECT_EQ(invoke3.GetNativePcOffset(kRuntimeISA), 16u * kPcAlign); } } // namespace art diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc index c0892ff466..3fe2ec0ac0 100644 --- a/compiler/verifier_deps_test.cc +++ b/compiler/verifier_deps_test.cc @@ -65,17 +65,16 @@ class VerifierDepsTest : public CommonCompilerTest { callbacks_.reset(new VerifierDepsCompilerCallbacks()); } - mirror::Class* FindClassByName(const std::string& name, ScopedObjectAccess* soa) + ObjPtr<mirror::Class> FindClassByName(ScopedObjectAccess& soa, const std::string& name) REQUIRES_SHARED(Locks::mutator_lock_) { - StackHandleScope<1> hs(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); Handle<mirror::ClassLoader> class_loader_handle( - hs.NewHandle(soa->Decode<mirror::ClassLoader>(class_loader_))); - mirror::Class* klass = class_linker_->FindClass(Thread::Current(), - name.c_str(), - class_loader_handle); + hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_))); + ObjPtr<mirror::Class> klass = + class_linker_->FindClass(soa.Self(), name.c_str(), class_loader_handle); if (klass == nullptr) { - DCHECK(Thread::Current()->IsExceptionPending()); - Thread::Current()->ClearException(); + DCHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); } return klass; } @@ -114,16 +113,16 @@ class VerifierDepsTest : public CommonCompilerTest { callbacks->SetVerifierDeps(verifier_deps_.get()); } - void LoadDexFile(ScopedObjectAccess* soa, const char* name1, const char* name2 = nullptr) + void LoadDexFile(ScopedObjectAccess& soa, const char* name1, const char* name2 = nullptr) REQUIRES_SHARED(Locks::mutator_lock_) { class_loader_ = (name2 == nullptr) ? LoadDex(name1) : LoadMultiDex(name1, name2); dex_files_ = GetDexFiles(class_loader_); primary_dex_file_ = dex_files_.front(); SetVerifierDeps(dex_files_); - StackHandleScope<1> hs(soa->Self()); + StackHandleScope<1> hs(soa.Self()); Handle<mirror::ClassLoader> loader = - hs.NewHandle(soa->Decode<mirror::ClassLoader>(class_loader_)); + hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)); for (const DexFile* dex_file : dex_files_) { class_linker_->RegisterDexFile(*dex_file, loader.Get()); } @@ -133,16 +132,16 @@ class VerifierDepsTest : public CommonCompilerTest { compiler_driver_->SetDexFilesForOatFile(dex_files_); } - void LoadDexFile(ScopedObjectAccess* soa) REQUIRES_SHARED(Locks::mutator_lock_) { + void LoadDexFile(ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_) { LoadDexFile(soa, "VerifierDeps"); CHECK_EQ(dex_files_.size(), 1u); - klass_Main_ = FindClassByName("LMain;", soa); + klass_Main_ = FindClassByName(soa, "LMain;"); CHECK(klass_Main_ != nullptr); } bool VerifyMethod(const std::string& method_name) { ScopedObjectAccess soa(Thread::Current()); - LoadDexFile(&soa); + LoadDexFile(soa); StackHandleScope<2> hs(soa.Self()); Handle<mirror::ClassLoader> class_loader_handle( @@ -193,7 +192,7 @@ class VerifierDepsTest : public CommonCompilerTest { void VerifyDexFile(const char* multidex = nullptr) { { ScopedObjectAccess soa(Thread::Current()); - LoadDexFile(&soa, "VerifierDeps", multidex); + LoadDexFile(soa, "VerifierDeps", multidex); } SetupCompilerDriver(); VerifyWithCompilerDriver(/* verifier_deps */ nullptr); @@ -204,13 +203,14 @@ class VerifierDepsTest : public CommonCompilerTest { bool is_strict, bool is_assignable) { ScopedObjectAccess soa(Thread::Current()); - LoadDexFile(&soa); - mirror::Class* klass_dst = FindClassByName(dst, &soa); + LoadDexFile(soa); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::Class> klass_dst = hs.NewHandle(FindClassByName(soa, dst)); DCHECK(klass_dst != nullptr) << dst; - mirror::Class* klass_src = FindClassByName(src, &soa); + ObjPtr<mirror::Class> klass_src = FindClassByName(soa, src); DCHECK(klass_src != nullptr) << src; verifier_deps_->AddAssignability(*primary_dex_file_, - klass_dst, + klass_dst.Get(), klass_src, is_strict, is_assignable); @@ -453,12 +453,12 @@ class VerifierDepsTest : public CommonCompilerTest { std::vector<const DexFile*> dex_files_; const DexFile* primary_dex_file_; jobject class_loader_; - mirror::Class* klass_Main_; + ObjPtr<mirror::Class> klass_Main_; }; TEST_F(VerifierDepsTest, StringToId) { ScopedObjectAccess soa(Thread::Current()); - LoadDexFile(&soa); + LoadDexFile(soa); dex::StringIndex id_Main1 = verifier_deps_->GetIdFromString(*primary_dex_file_, "LMain;"); ASSERT_LT(id_Main1.index_, primary_dex_file_->NumStringIds()); @@ -1441,7 +1441,7 @@ TEST_F(VerifierDepsTest, CompilerDriver) { for (bool verify_failure : { false, true }) { { ScopedObjectAccess soa(Thread::Current()); - LoadDexFile(&soa, "VerifierDeps", multi); + LoadDexFile(soa, "VerifierDeps", multi); } VerifyWithCompilerDriver(/* verifier_deps */ nullptr); @@ -1450,7 +1450,7 @@ TEST_F(VerifierDepsTest, CompilerDriver) { { ScopedObjectAccess soa(Thread::Current()); - LoadDexFile(&soa, "VerifierDeps", multi); + LoadDexFile(soa, "VerifierDeps", multi); } verifier::VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer)); if (verify_failure) { diff --git a/dex2oat/linker/image_test.cc b/dex2oat/linker/image_test.cc index ab6e7a875a..96c48b8798 100644 --- a/dex2oat/linker/image_test.cc +++ b/dex2oat/linker/image_test.cc @@ -111,18 +111,18 @@ TEST_F(ImageTest, TestDefaultMethods) { // Test the pointer to quick code is the same in origin method // and in the copied method form the same oat file. - mirror::Class* iface_klass = class_linker_->LookupClass( - self, "LIface;", ObjPtr<mirror::ClassLoader>()); + ObjPtr<mirror::Class> iface_klass = + class_linker_->LookupClass(self, "LIface;", /* class_loader */ nullptr); ASSERT_NE(nullptr, iface_klass); ArtMethod* origin = iface_klass->FindInterfaceMethod("defaultMethod", "()V", pointer_size); ASSERT_NE(nullptr, origin); - ASSERT_TRUE(origin->GetDeclaringClass() == iface_klass); + ASSERT_OBJ_PTR_EQ(origin->GetDeclaringClass(), iface_klass); const void* code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); // The origin method should have a pointer to quick code ASSERT_NE(nullptr, code); ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code)); - mirror::Class* impl_klass = class_linker_->LookupClass( - self, "LImpl;", ObjPtr<mirror::ClassLoader>()); + ObjPtr<mirror::Class> impl_klass = + class_linker_->LookupClass(self, "LImpl;", /* class_loader */ nullptr); ASSERT_NE(nullptr, impl_klass); ArtMethod* copied = FindCopiedMethod(origin, impl_klass); ASSERT_NE(nullptr, copied); @@ -132,20 +132,20 @@ TEST_F(ImageTest, TestDefaultMethods) { // Test the origin method has pointer to quick code // but the copied method has pointer to interpreter // because these methods are in different oat files. - mirror::Class* iterable_klass = class_linker_->LookupClass( - self, "Ljava/lang/Iterable;", ObjPtr<mirror::ClassLoader>()); + ObjPtr<mirror::Class> iterable_klass = + class_linker_->LookupClass(self, "Ljava/lang/Iterable;", /* class_loader */ nullptr); ASSERT_NE(nullptr, iterable_klass); origin = iterable_klass->FindClassMethod( "forEach", "(Ljava/util/function/Consumer;)V", pointer_size); ASSERT_NE(nullptr, origin); ASSERT_FALSE(origin->IsDirect()); - ASSERT_TRUE(origin->GetDeclaringClass() == iterable_klass); + ASSERT_OBJ_PTR_EQ(origin->GetDeclaringClass(), iterable_klass); code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); // the origin method should have a pointer to quick code ASSERT_NE(nullptr, code); ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code)); - mirror::Class* iterablebase_klass = class_linker_->LookupClass( - self, "LIterableBase;", ObjPtr<mirror::ClassLoader>()); + ObjPtr<mirror::Class> iterablebase_klass = + class_linker_->LookupClass(self, "LIterableBase;", /* class_loader */ nullptr); ASSERT_NE(nullptr, iterablebase_klass); copied = FindCopiedMethod(origin, iterablebase_klass); ASSERT_NE(nullptr, copied); diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h index 4b231ed35c..f0daf69850 100644 --- a/dex2oat/linker/image_test.h +++ b/dex2oat/linker/image_test.h @@ -97,7 +97,7 @@ class ImageTest : public CommonCompilerTest { return new std::unordered_set<std::string>(image_classes_); } - ArtMethod* FindCopiedMethod(ArtMethod* origin, mirror::Class* klass) + ArtMethod* FindCopiedMethod(ArtMethod* origin, ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) { PointerSize pointer_size = class_linker_->GetImagePointerSize(); for (ArtMethod& m : klass->GetCopiedMethods(pointer_size)) { diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h index 49ca98d47f..3bb9e93e5a 100644 --- a/libdexfile/dex/class_accessor-inl.h +++ b/libdexfile/dex/class_accessor-inl.h @@ -37,30 +37,26 @@ inline ClassAccessor::ClassAccessor(const DexFile& dex_file, const DexFile::Clas num_direct_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u), num_virtual_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u) {} -inline const uint8_t* ClassAccessor::Method::Read(const uint8_t* ptr) { - index_ += DecodeUnsignedLeb128(&ptr); - access_flags_ = DecodeUnsignedLeb128(&ptr); - code_off_ = DecodeUnsignedLeb128(&ptr); - return ptr; +inline void ClassAccessor::Method::Read() { + index_ += DecodeUnsignedLeb128(&ptr_pos_); + access_flags_ = DecodeUnsignedLeb128(&ptr_pos_); + code_off_ = DecodeUnsignedLeb128(&ptr_pos_); } -inline const uint8_t* ClassAccessor::Field::Read(const uint8_t* ptr) { - index_ += DecodeUnsignedLeb128(&ptr); - access_flags_ = DecodeUnsignedLeb128(&ptr); - return ptr; +inline void ClassAccessor::Field::Read() { + index_ += DecodeUnsignedLeb128(&ptr_pos_); + access_flags_ = DecodeUnsignedLeb128(&ptr_pos_); } template <typename DataType, typename Visitor> -inline const uint8_t* ClassAccessor::VisitMembers(size_t count, - const Visitor& visitor, - const uint8_t* ptr, - DataType* data) const { +inline void ClassAccessor::VisitMembers(size_t count, + const Visitor& visitor, + DataType* data) const { DCHECK(data != nullptr); for ( ; count != 0; --count) { - ptr = data->Read(ptr); + data->Read(); visitor(*data); } - return ptr; } template <typename StaticFieldVisitor, @@ -72,15 +68,15 @@ inline void ClassAccessor::VisitFieldsAndMethods( const InstanceFieldVisitor& instance_field_visitor, const DirectMethodVisitor& direct_method_visitor, const VirtualMethodVisitor& virtual_method_visitor) const { - Field field(dex_file_); - const uint8_t* ptr = VisitMembers(num_static_fields_, static_field_visitor, ptr_pos_, &field); + Field field(dex_file_, ptr_pos_); + VisitMembers(num_static_fields_, static_field_visitor, &field); field.NextSection(); - ptr = VisitMembers(num_instance_fields_, instance_field_visitor, ptr, &field); + VisitMembers(num_instance_fields_, instance_field_visitor, &field); - Method method(dex_file_, /*is_static_or_direct*/ true); - ptr = VisitMembers(num_direct_methods_, direct_method_visitor, ptr, &method); + Method method(dex_file_, field.ptr_pos_, /*is_static_or_direct*/ true); + VisitMembers(num_direct_methods_, direct_method_visitor, &method); method.NextSection(); - ptr = VisitMembers(num_virtual_methods_, virtual_method_visitor, ptr, &method); + VisitMembers(num_virtual_methods_, virtual_method_visitor, &method); } template <typename DirectMethodVisitor, @@ -119,23 +115,64 @@ inline const DexFile::CodeItem* ClassAccessor::Method::GetCodeItem() const { return dex_file_.GetCodeItem(code_off_); } +inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Field>> + ClassAccessor::GetFieldsInternal(size_t count) const { + return { DataIterator<Field>(dex_file_, 0u, num_static_fields_, count, ptr_pos_), + DataIterator<Field>(dex_file_, count, num_static_fields_, count, ptr_pos_) }; +} + +// Return an iteration range for the first <count> methods. +inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Method>> + ClassAccessor::GetMethodsInternal(size_t count) const { + // Skip over the fields. + Field field(dex_file_, ptr_pos_); + VisitMembers(NumFields(), VoidFunctor(), &field); + // Return the iterator pair. + return { DataIterator<Method>(dex_file_, 0u, num_direct_methods_, count, field.ptr_pos_), + DataIterator<Method>(dex_file_, count, num_direct_methods_, count, field.ptr_pos_) }; +} + inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Field>> ClassAccessor::GetFields() const { - const uint32_t limit = num_static_fields_ + num_instance_fields_; - return { DataIterator<Field>(dex_file_, 0u, num_static_fields_, limit, ptr_pos_), - DataIterator<Field>(dex_file_, limit, num_static_fields_, limit, ptr_pos_) }; + return GetFieldsInternal(num_static_fields_ + num_instance_fields_); +} + +inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Field>> + ClassAccessor::GetStaticFields() const { + return GetFieldsInternal(num_static_fields_); +} + + +inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Field>> + ClassAccessor::GetInstanceFields() const { + IterationRange<ClassAccessor::DataIterator<ClassAccessor::Field>> fields = GetFields(); + // Skip the static fields. + return { std::next(fields.begin(), NumStaticFields()), fields.end() }; } inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Method>> ClassAccessor::GetMethods() const { - // Skip over the fields. - Field field(dex_file_); - const size_t skip_count = num_static_fields_ + num_instance_fields_; - const uint8_t* ptr_pos = VisitMembers(skip_count, VoidFunctor(), ptr_pos_, &field); - // Return the iterator pair for all the methods. - const uint32_t limit = num_direct_methods_ + num_virtual_methods_; - return { DataIterator<Method>(dex_file_, 0u, num_direct_methods_, limit, ptr_pos), - DataIterator<Method>(dex_file_, limit, num_direct_methods_, limit, ptr_pos) }; + return GetMethodsInternal(NumMethods()); +} + +inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Method>> + ClassAccessor::GetDirectMethods() const { + return GetMethodsInternal(NumDirectMethods()); +} + +inline IterationRange<ClassAccessor::DataIterator<ClassAccessor::Method>> + ClassAccessor::GetVirtualMethods() const { + IterationRange<DataIterator<Method>> methods = GetMethods(); + // Skip the direct fields. + return { std::next(methods.begin(), NumDirectMethods()), methods.end() }; +} + +inline void ClassAccessor::Field::UnHideAccessFlags() const { + DexFile::UnHideAccessFlags(const_cast<uint8_t*>(ptr_pos_), GetAccessFlags(), /*is_method*/ false); +} + +inline void ClassAccessor::Method::UnHideAccessFlags() const { + DexFile::UnHideAccessFlags(const_cast<uint8_t*>(ptr_pos_), GetAccessFlags(), /*is_method*/ true); } } // namespace art diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h index dda6e1c1a6..4f0fd32e31 100644 --- a/libdexfile/dex/class_accessor.h +++ b/libdexfile/dex/class_accessor.h @@ -20,6 +20,7 @@ #include "base/utils.h" #include "code_item_accessors.h" #include "dex_file.h" +#include "hidden_api_access_flags.h" #include "invoke_type.h" #include "method_reference.h" #include "modifiers.h" @@ -33,12 +34,18 @@ class ClassAccessor { private: class BaseItem { public: + explicit BaseItem(const uint8_t* ptr_pos) : ptr_pos_(ptr_pos) {} + uint32_t GetIndex() const { return index_; } uint32_t GetAccessFlags() const { - return access_flags_; + return HiddenApiAccessFlags::RemoveFromDex(access_flags_); + } + + HiddenApiAccessFlags::ApiList DecodeHiddenAccessFlags() const { + return HiddenApiAccessFlags::DecodeFromDex(access_flags_); } bool IsFinal() const { @@ -46,6 +53,8 @@ class ClassAccessor { } protected: + // Internal data pointer for reading. + const uint8_t* ptr_pos_ = nullptr; uint32_t index_ = 0u; uint32_t access_flags_ = 0u; }; @@ -76,13 +85,18 @@ class ClassAccessor { return is_static_or_direct_; } + // Unhide the hidden API access flags at the iterator position. TODO: Deprecate. + void UnHideAccessFlags() const; + private: explicit Method(const DexFile& dex_file, + const uint8_t* ptr_pos, bool is_static_or_direct = true) - : dex_file_(dex_file), + : BaseItem(ptr_pos), + dex_file_(dex_file), is_static_or_direct_(is_static_or_direct) {} - const uint8_t* Read(const uint8_t* ptr); + void Read(); InvokeType GetDirectMethodInvokeType() const { return (GetAccessFlags() & kAccStatic) != 0 ? kStatic : kDirect; @@ -99,6 +113,7 @@ class ClassAccessor { } } + // Move to virtual method section. void NextSection() { DCHECK(is_static_or_direct_) << "Already in the virtual methods section"; is_static_or_direct_ = false; @@ -115,20 +130,31 @@ class ClassAccessor { // A decoded version of the field of a class_data_item. class Field : public BaseItem { public: - explicit Field(const DexFile& dex_file) : dex_file_(dex_file) {} + explicit Field(const DexFile& dex_file, + const uint8_t* ptr_pos) : BaseItem(ptr_pos), dex_file_(dex_file) {} const DexFile& GetDexFile() const { return dex_file_; } + bool IsStatic() const { + return is_static_; + } + + // Unhide the hidden API access flags at the iterator position. TODO: Deprecate. + void UnHideAccessFlags() const; + private: - const uint8_t* Read(const uint8_t* ptr); + void Read(); + // Move to instance fields section. void NextSection() { index_ = 0u; + is_static_ = false; } const DexFile& dex_file_; + bool is_static_ = true; friend class ClassAccessor; }; @@ -144,11 +170,10 @@ class ClassAccessor { uint32_t partition_pos, uint32_t iterator_end, const uint8_t* ptr_pos) - : data_(dex_file), + : data_(dex_file, ptr_pos), position_(position), partition_pos_(partition_pos), - iterator_end_(iterator_end), - ptr_pos_(ptr_pos) { + iterator_end_(iterator_end) { ReadData(); } @@ -205,8 +230,7 @@ class ClassAccessor { if (position_ == partition_pos_) { data_.NextSection(); } - DCHECK(ptr_pos_ != nullptr); - ptr_pos_ = data_.Read(ptr_pos_); + data_.Read(); } } @@ -217,8 +241,6 @@ class ClassAccessor { const uint32_t partition_pos_; // At iterator_end_, the iterator is no longer valid. const uint32_t iterator_end_; - // Internal data pointer. - const uint8_t* ptr_pos_; }; // Not explicit specifically for range-based loops. @@ -252,9 +274,21 @@ class ClassAccessor { // Return the iteration range for all the fields. IterationRange<DataIterator<Field>> GetFields() const; + // Return the iteration range for all the static fields. + IterationRange<DataIterator<Field>> GetStaticFields() const; + + // Return the iteration range for all the instance fields. + IterationRange<DataIterator<Field>> GetInstanceFields() const; + // Return the iteration range for all the methods. IterationRange<DataIterator<Method>> GetMethods() const; + // Return the iteration range for the direct methods. + IterationRange<DataIterator<Method>> GetDirectMethods() const; + + // Return the iteration range for the virtual methods. + IterationRange<DataIterator<Method>> GetVirtualMethods() const; + uint32_t NumStaticFields() const { return num_static_fields_; } @@ -263,6 +297,10 @@ class ClassAccessor { return num_instance_fields_; } + uint32_t NumFields() const { + return NumStaticFields() + NumInstanceFields(); + } + uint32_t NumDirectMethods() const { return num_direct_methods_; } @@ -285,14 +323,22 @@ class ClassAccessor { return dex_file_; } + bool HasClassData() const { + return ptr_pos_ != nullptr; + } + protected: // Template visitor to reduce copy paste for visiting elements. // No thread safety analysis since the visitor may require capabilities. template <typename DataType, typename Visitor> - const uint8_t* VisitMembers(size_t count, - const Visitor& visitor, - const uint8_t* ptr, - DataType* data) const NO_THREAD_SAFETY_ANALYSIS; + void VisitMembers(size_t count, const Visitor& visitor, DataType* data) const + NO_THREAD_SAFETY_ANALYSIS; + + // Return an iteration range for the first <count> fields. + IterationRange<DataIterator<Field>> GetFieldsInternal(size_t count) const; + + // Return an iteration range for the first <count> methods. + IterationRange<DataIterator<Method>> GetMethodsInternal(size_t count) const; const DexFile& dex_file_; const dex::TypeIndex descriptor_index_ = {}; diff --git a/libdexfile/dex/class_accessor_test.cc b/libdexfile/dex/class_accessor_test.cc index 95380d8140..d0533c1811 100644 --- a/libdexfile/dex/class_accessor_test.cc +++ b/libdexfile/dex/class_accessor_test.cc @@ -38,18 +38,27 @@ TEST_F(ClassAccessorTest, TestVisiting) { auto fields = accessor.GetFields(); auto method_it = methods.begin(); auto field_it = fields.begin(); + auto instance_fields = accessor.GetInstanceFields(); + auto instance_field_it = instance_fields.begin(); accessor.VisitFieldsAndMethods( // Static fields. [&](const ClassAccessor::Field& field) { + EXPECT_TRUE(field.IsStatic()); + EXPECT_TRUE(field_it->IsStatic()); EXPECT_EQ(field.GetIndex(), field_it->GetIndex()); EXPECT_EQ(field.GetAccessFlags(), field_it->GetAccessFlags()); ++field_it; }, // Instance fields. [&](const ClassAccessor::Field& field) { + EXPECT_FALSE(field.IsStatic()); + EXPECT_FALSE(field_it->IsStatic()); EXPECT_EQ(field.GetIndex(), field_it->GetIndex()); EXPECT_EQ(field.GetAccessFlags(), field_it->GetAccessFlags()); + EXPECT_EQ(field.GetIndex(), instance_field_it->GetIndex()); + EXPECT_EQ(field.GetAccessFlags(), instance_field_it->GetAccessFlags()); ++field_it; + ++instance_field_it; }, // Direct methods. [&](const ClassAccessor::Method& method) { @@ -71,6 +80,7 @@ TEST_F(ClassAccessorTest, TestVisiting) { }); ASSERT_TRUE(field_it == fields.end()); ASSERT_TRUE(method_it == methods.end()); + ASSERT_TRUE(instance_field_it == instance_fields.end()); } EXPECT_EQ(class_def_idx, dex_file->NumClassDefs()); } diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc index 9de260c862..f570158dfb 100644 --- a/libdexfile/dex/dex_file.cc +++ b/libdexfile/dex/dex_file.cc @@ -45,19 +45,18 @@ static_assert(std::is_trivially_copyable<dex::StringIndex>::value, "StringIndex static_assert(sizeof(dex::TypeIndex) == sizeof(uint16_t), "TypeIndex size is wrong"); static_assert(std::is_trivially_copyable<dex::TypeIndex>::value, "TypeIndex not trivial"); -void DexFile::UnHideAccessFlags(ClassDataItemIterator& class_it) { - uint8_t* data = const_cast<uint8_t*>(class_it.DataPointer()); - uint32_t new_flag = class_it.GetMemberAccessFlags(); - bool is_method = class_it.IsAtMethod(); +void DexFile::UnHideAccessFlags(uint8_t* data_ptr, + uint32_t new_access_flags, + bool is_method) { // Go back 1 uleb to start. - data = ReverseSearchUnsignedLeb128(data); + data_ptr = ReverseSearchUnsignedLeb128(data_ptr); if (is_method) { // Methods have another uleb field before the access flags - data = ReverseSearchUnsignedLeb128(data); + data_ptr = ReverseSearchUnsignedLeb128(data_ptr); } - DCHECK_EQ(HiddenApiAccessFlags::RemoveFromDex(DecodeUnsignedLeb128WithoutMovingCursor(data)), - new_flag); - UpdateUnsignedLeb128(data, new_flag); + DCHECK_EQ(HiddenApiAccessFlags::RemoveFromDex(DecodeUnsignedLeb128WithoutMovingCursor(data_ptr)), + new_access_flags); + UpdateUnsignedLeb128(data_ptr, new_access_flags); } uint32_t DexFile::CalculateChecksum() const { diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h index f1f8b505bd..ed219808d2 100644 --- a/libdexfile/dex/dex_file.h +++ b/libdexfile/dex/dex_file.h @@ -1010,8 +1010,8 @@ class DexFile { return container_.get(); } - // Changes the dex file pointed to by class_it to not have any hiddenapi flags. - static void UnHideAccessFlags(ClassDataItemIterator& class_it); + // Changes the dex class data pointed to by data_ptr it to not have any hiddenapi flags. + static void UnHideAccessFlags(uint8_t* data_ptr, uint32_t new_access_flags, bool is_method); inline IterationRange<ClassIterator> GetClasses() const; diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc index ead7cba60b..42c3320ea5 100644 --- a/libprofile/profile/profile_compilation_info_test.cc +++ b/libprofile/profile/profile_compilation_info_test.cc @@ -17,20 +17,14 @@ #include <gtest/gtest.h> #include <stdio.h> -#include "art_method-inl.h" +#include "base/arena_allocator.h" +#include "base/common_art_test.h" #include "base/unix_file/fd_file.h" -#include "class_linker-inl.h" -#include "common_runtime_test.h" #include "dex/dex_file.h" #include "dex/dex_file_loader.h" #include "dex/method_reference.h" #include "dex/type_reference.h" -#include "handle_scope-inl.h" -#include "linear_alloc.h" -#include "mirror/class-inl.h" -#include "mirror/class_loader.h" #include "profile/profile_compilation_info.h" -#include "scoped_thread_state_change-inl.h" #include "ziparchive/zip_writer.h" namespace art { @@ -39,31 +33,14 @@ using Hotness = ProfileCompilationInfo::MethodHotness; static constexpr size_t kMaxMethodIds = 65535; -class ProfileCompilationInfoTest : public CommonRuntimeTest { +class ProfileCompilationInfoTest : public CommonArtTest { public: - void PostRuntimeCreate() OVERRIDE { - allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool())); + void SetUp() OVERRIDE { + CommonArtTest::SetUp(); + allocator_.reset(new ArenaAllocator(&pool_)); } protected: - std::vector<ArtMethod*> GetVirtualMethods(jobject class_loader, - const std::string& clazz) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Thread* self = Thread::Current(); - ScopedObjectAccess soa(self); - StackHandleScope<1> hs(self); - Handle<mirror::ClassLoader> h_loader( - hs.NewHandle(self->DecodeJObject(class_loader)->AsClassLoader())); - ObjPtr<mirror::Class> klass = class_linker->FindClass(self, clazz.c_str(), h_loader); - - const auto pointer_size = class_linker->GetImagePointerSize(); - std::vector<ArtMethod*> methods; - for (auto& m : klass->GetVirtualMethods(pointer_size)) { - methods.push_back(&m); - } - return methods; - } - bool AddMethod(const std::string& dex_location, uint32_t checksum, uint16_t method_index, @@ -97,89 +74,6 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest { return static_cast<uint32_t>(file.GetFd()); } - bool SaveProfilingInfo( - const std::string& filename, - const std::vector<ArtMethod*>& methods, - const std::set<DexCacheResolvedClasses>& resolved_classes, - Hotness::Flag flags) { - ProfileCompilationInfo info; - std::vector<ProfileMethodInfo> profile_methods; - ScopedObjectAccess soa(Thread::Current()); - for (ArtMethod* method : methods) { - profile_methods.emplace_back( - MethodReference(method->GetDexFile(), method->GetDexMethodIndex())); - } - if (!info.AddMethods(profile_methods, flags) || !info.AddClasses(resolved_classes)) { - return false; - } - if (info.GetNumberOfMethods() != profile_methods.size()) { - return false; - } - ProfileCompilationInfo file_profile; - if (!file_profile.Load(filename, false)) { - return false; - } - if (!info.MergeWith(file_profile)) { - return false; - } - - return info.Save(filename, nullptr); - } - - // Saves the given art methods to a profile backed by 'filename' and adds - // some fake inline caches to it. The added inline caches are returned in - // the out map `profile_methods_map`. - bool SaveProfilingInfoWithFakeInlineCaches( - const std::string& filename, - const std::vector<ArtMethod*>& methods, - Hotness::Flag flags, - /*out*/ SafeMap<ArtMethod*, ProfileMethodInfo>* profile_methods_map) { - ProfileCompilationInfo info; - std::vector<ProfileMethodInfo> profile_methods; - ScopedObjectAccess soa(Thread::Current()); - for (ArtMethod* method : methods) { - std::vector<ProfileMethodInfo::ProfileInlineCache> caches; - // Monomorphic - for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) { - std::vector<TypeReference> classes; - classes.emplace_back(method->GetDexFile(), dex::TypeIndex(0)); - caches.emplace_back(dex_pc, /*is_missing_types*/false, classes); - } - // Polymorphic - for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) { - std::vector<TypeReference> classes; - for (uint16_t k = 0; k < InlineCache::kIndividualCacheSize / 2; k++) { - classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k)); - } - caches.emplace_back(dex_pc, /*is_missing_types*/false, classes); - } - // Megamorphic - for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) { - std::vector<TypeReference> classes; - for (uint16_t k = 0; k < 2 * InlineCache::kIndividualCacheSize; k++) { - classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k)); - } - caches.emplace_back(dex_pc, /*is_missing_types*/false, classes); - } - // Missing types - for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) { - std::vector<TypeReference> classes; - caches.emplace_back(dex_pc, /*is_missing_types*/true, classes); - } - ProfileMethodInfo pmi(MethodReference(method->GetDexFile(), - method->GetDexMethodIndex()), - caches); - profile_methods.push_back(pmi); - profile_methods_map->Put(method, pmi); - } - - if (!info.AddMethods(profile_methods, flags) - || info.GetNumberOfMethods() != profile_methods.size()) { - return false; - } - return info.Save(filename, nullptr); - } - // Creates an inline cache which will be destructed at the end of the test. ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() { used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap( @@ -187,35 +81,6 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest { return used_inline_caches.back().get(); } - ProfileCompilationInfo::OfflineProfileMethodInfo ConvertProfileMethodInfo( - const ProfileMethodInfo& pmi) { - ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap(); - ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi(ic_map); - SafeMap<DexFile*, uint8_t> dex_map; // dex files to profile index - for (const auto& inline_cache : pmi.inline_caches) { - ProfileCompilationInfo::DexPcData& dex_pc_data = - ic_map->FindOrAdd( - inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(allocator_.get()))->second; - if (inline_cache.is_missing_types) { - dex_pc_data.SetIsMissingTypes(); - } - for (const auto& class_ref : inline_cache.classes) { - uint8_t dex_profile_index = dex_map.FindOrAdd(const_cast<DexFile*>(class_ref.dex_file), - static_cast<uint8_t>(dex_map.size()))->second; - dex_pc_data.AddClass(dex_profile_index, class_ref.TypeIndex()); - if (dex_profile_index >= offline_pmi.dex_references.size()) { - // This is a new dex. - const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileKey( - class_ref.dex_file->GetLocation()); - offline_pmi.dex_references.emplace_back(dex_key, - class_ref.dex_file->GetLocationChecksum(), - class_ref.dex_file->NumMethodIds()); - } - } - } - return offline_pmi; - } - // Creates an offline profile used for testing inline caches. ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() { ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap(); @@ -261,7 +126,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest { ProfileCompilationInfo::InlineCacheMap* ic_map = const_cast<ProfileCompilationInfo::InlineCacheMap*>(pmi->inline_caches); for (auto it : *ic_map) { - for (uint16_t k = 0; k <= 2 * InlineCache::kIndividualCacheSize; k++) { + for (uint16_t k = 0; k <= 2 * ProfileCompilationInfo::kIndividualInlineCacheSize; k++) { it.second.AddClass(0, dex::TypeIndex(k)); } } @@ -327,6 +192,7 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest { static constexpr int kProfileMagicSize = 4; static constexpr int kProfileVersionSize = 4; + MallocArenaPool pool_; std::unique_ptr<ArenaAllocator> allocator_; // Cache of inline caches generated during tests. @@ -335,61 +201,6 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest { std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches; }; -TEST_F(ProfileCompilationInfoTest, SaveArtMethods) { - ScratchFile profile; - - Thread* self = Thread::Current(); - jobject class_loader; - { - ScopedObjectAccess soa(self); - class_loader = LoadDex("ProfileTestMultiDex"); - } - ASSERT_NE(class_loader, nullptr); - - // Save virtual methods from Main. - std::set<DexCacheResolvedClasses> resolved_classes; - std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;"); - ASSERT_TRUE(SaveProfilingInfo( - profile.GetFilename(), main_methods, resolved_classes, Hotness::kFlagPostStartup)); - - // Check that what we saved is in the profile. - ProfileCompilationInfo info1; - ASSERT_TRUE(info1.Load(GetFd(profile))); - ASSERT_EQ(info1.GetNumberOfMethods(), main_methods.size()); - { - ScopedObjectAccess soa(self); - for (ArtMethod* m : main_methods) { - Hotness h = info1.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); - ASSERT_TRUE(h.IsHot()); - ASSERT_TRUE(h.IsPostStartup()); - } - } - - // Save virtual methods from Second. - std::vector<ArtMethod*> second_methods = GetVirtualMethods(class_loader, "LSecond;"); - ASSERT_TRUE(SaveProfilingInfo( - profile.GetFilename(), second_methods, resolved_classes, Hotness::kFlagStartup)); - - // Check that what we saved is in the profile (methods form Main and Second). - ProfileCompilationInfo info2; - ASSERT_TRUE(profile.GetFile()->ResetOffset()); - ASSERT_TRUE(info2.Load(GetFd(profile))); - ASSERT_EQ(info2.GetNumberOfMethods(), main_methods.size() + second_methods.size()); - { - ScopedObjectAccess soa(self); - for (ArtMethod* m : main_methods) { - Hotness h = info2.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); - ASSERT_TRUE(h.IsHot()); - ASSERT_TRUE(h.IsPostStartup()); - } - for (ArtMethod* m : second_methods) { - Hotness h = info2.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); - ASSERT_TRUE(h.IsHot()); - ASSERT_TRUE(h.IsStartup()); - } - } -} - TEST_F(ProfileCompilationInfoTest, SaveFd) { ScratchFile profile; @@ -722,48 +533,6 @@ TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) { ASSERT_TRUE(*loaded_pmi1 == pmi_extra); } -TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) { - ScratchFile profile; - - Thread* self = Thread::Current(); - jobject class_loader; - { - ScopedObjectAccess soa(self); - class_loader = LoadDex("ProfileTestMultiDex"); - } - ASSERT_NE(class_loader, nullptr); - - // Save virtual methods from Main. - std::set<DexCacheResolvedClasses> resolved_classes; - std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;"); - - SafeMap<ArtMethod*, ProfileMethodInfo> profile_methods_map; - ASSERT_TRUE(SaveProfilingInfoWithFakeInlineCaches( - profile.GetFilename(), main_methods, Hotness::kFlagStartup, &profile_methods_map)); - - // Check that what we saved is in the profile. - ProfileCompilationInfo info; - ASSERT_TRUE(info.Load(GetFd(profile))); - ASSERT_EQ(info.GetNumberOfMethods(), main_methods.size()); - { - ScopedObjectAccess soa(self); - for (ArtMethod* m : main_methods) { - Hotness h = info.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); - ASSERT_TRUE(h.IsHot()); - ASSERT_TRUE(h.IsStartup()); - const ProfileMethodInfo& pmi = profile_methods_map.find(m)->second; - std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_pmi = - info.GetMethod(m->GetDexFile()->GetLocation(), - m->GetDexFile()->GetLocationChecksum(), - m->GetDexMethodIndex()); - ASSERT_TRUE(offline_pmi != nullptr); - ProfileCompilationInfo::OfflineProfileMethodInfo converted_pmi = - ConvertProfileMethodInfo(pmi); - ASSERT_EQ(converted_pmi, *offline_pmi); - } - } -} - TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCache) { ScratchFile profile; diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 9eeb530644..6b626c21cf 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -41,6 +41,7 @@ #include "base/unix_file/fd_file.h" #include "class_linker-inl.h" #include "class_linker.h" +#include "class_root.h" #include "compiled_method.h" #include "debug/debug_info.h" #include "debug/elf_debug_writer.h" @@ -1699,7 +1700,7 @@ class OatDumper { // Stack maps stats_.AddBits( Stats::kByteKindStackMapNativePc, - stack_maps.NumColumnBits(StackMap::kNativePcOffset) * num_stack_maps); + stack_maps.NumColumnBits(StackMap::kPackedNativePc) * num_stack_maps); stats_.AddBits( Stats::kByteKindStackMapDexPc, stack_maps.NumColumnBits(StackMap::kDexPc) * num_stack_maps); @@ -1719,7 +1720,7 @@ class OatDumper { // Stack masks stats_.AddBits( Stats::kByteKindCodeInfoStackMasks, - code_info.stack_masks_.size_in_bits()); + code_info.stack_masks_.DataBitSize()); // Register masks stats_.AddBits( @@ -3253,7 +3254,7 @@ class IMTDumper { PrepareClass(runtime, klass, prepared); } - mirror::Class* object_class = mirror::Class::GetJavaLangClass()->GetSuperClass(); + ObjPtr<mirror::Class> object_class = GetClassRoot<mirror::Object>(); DCHECK(object_class->IsObjectClass()); bool result = klass->GetImt(pointer_size) == object_class->GetImt(pointer_size); @@ -3287,8 +3288,8 @@ class IMTDumper { Handle<mirror::ClassLoader> h_loader, const std::string& class_name, const PointerSize pointer_size, - mirror::Class** klass_out, - std::unordered_set<std::string>* prepared) + /*out*/ ObjPtr<mirror::Class>* klass_out, + /*inout*/ std::unordered_set<std::string>* prepared) REQUIRES_SHARED(Locks::mutator_lock_) { if (class_name.empty()) { return nullptr; @@ -3301,7 +3302,8 @@ class IMTDumper { descriptor = DotToDescriptor(class_name.c_str()); } - mirror::Class* klass = runtime->GetClassLinker()->FindClass(self, descriptor.c_str(), h_loader); + ObjPtr<mirror::Class> klass = + runtime->GetClassLinker()->FindClass(self, descriptor.c_str(), h_loader); if (klass == nullptr) { self->ClearException(); @@ -3321,7 +3323,7 @@ class IMTDumper { static ImTable* PrepareAndGetImTable(Runtime* runtime, Handle<mirror::Class> h_klass, const PointerSize pointer_size, - std::unordered_set<std::string>* prepared) + /*inout*/ std::unordered_set<std::string>* prepared) REQUIRES_SHARED(Locks::mutator_lock_) { PrepareClass(runtime, h_klass, prepared); return h_klass->GetImt(pointer_size); @@ -3333,7 +3335,7 @@ class IMTDumper { std::unordered_set<std::string>* prepared) REQUIRES_SHARED(Locks::mutator_lock_) { const PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize(); - mirror::Class* klass; + ObjPtr<mirror::Class> klass; ImTable* imt = PrepareAndGetImTable(runtime, Thread::Current(), h_loader, @@ -3389,10 +3391,10 @@ class IMTDumper { const std::string& class_name, const std::string& method, Handle<mirror::ClassLoader> h_loader, - std::unordered_set<std::string>* prepared) + /*inout*/ std::unordered_set<std::string>* prepared) REQUIRES_SHARED(Locks::mutator_lock_) { const PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize(); - mirror::Class* klass; + ObjPtr<mirror::Class> klass; ImTable* imt = PrepareAndGetImTable(runtime, Thread::Current(), h_loader, @@ -3495,7 +3497,7 @@ class IMTDumper { // and note in the given set that the work was done. static void PrepareClass(Runtime* runtime, Handle<mirror::Class> h_klass, - std::unordered_set<std::string>* done) + /*inout*/ std::unordered_set<std::string>* done) REQUIRES_SHARED(Locks::mutator_lock_) { if (!h_klass->ShouldHaveImt()) { return; diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc index fcbafe7e71..a660fb56c4 100644 --- a/openjdkjvmti/fixed_up_dex_file.cc +++ b/openjdkjvmti/fixed_up_dex_file.cc @@ -31,6 +31,7 @@ #include "base/leb128.h" #include "fixed_up_dex_file.h" +#include "dex/class_accessor-inl.h" #include "dex/dex_file-inl.h" #include "dex/dex_file_loader.h" #include "dex/dex_file_verifier.h" @@ -51,14 +52,12 @@ static void RecomputeDexChecksum(art::DexFile* dex_file) { } static void UnhideApis(const art::DexFile& target_dex_file) { - for (uint32_t i = 0; i < target_dex_file.NumClassDefs(); ++i) { - const uint8_t* class_data = target_dex_file.GetClassData(target_dex_file.GetClassDef(i)); - if (class_data != nullptr) { - for (art::ClassDataItemIterator class_it(target_dex_file, class_data); - class_it.HasNext(); - class_it.Next()) { - art::DexFile::UnHideAccessFlags(class_it); - } + for (art::ClassAccessor accessor : target_dex_file.GetClasses()) { + for (const art::ClassAccessor::Field& field : accessor.GetFields()) { + field.UnHideAccessFlags(); + } + for (const art::ClassAccessor::Method& method : accessor.GetMethods()) { + method.UnHideAccessFlags(); } } } diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc index 726e47ed5f..9bea18a763 100644 --- a/openjdkjvmti/ti_class.cc +++ b/openjdkjvmti/ti_class.cc @@ -71,9 +71,11 @@ #include "scoped_thread_state_change-inl.h" #include "thread-current-inl.h" #include "thread_list.h" +#include "ti_class_definition.h" #include "ti_class_loader-inl.h" #include "ti_phase.h" #include "ti_redefine.h" +#include "transform.h" #include "well_known_classes.h" namespace openjdkjvmti { @@ -713,7 +715,7 @@ jvmtiError ClassUtil::GetClassSignature(jvmtiEnv* env, if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) { art::StackHandleScope<1> hs(soa.Self()); art::Handle<art::mirror::Class> h_klass = hs.NewHandle(klass); - art::mirror::ObjectArray<art::mirror::String>* str_array = + art::ObjPtr<art::mirror::ObjectArray<art::mirror::String>> str_array = art::annotations::GetSignatureAnnotationForClass(h_klass); if (str_array != nullptr) { std::ostringstream oss; diff --git a/openjdkjvmti/ti_class_loader.h b/openjdkjvmti/ti_class_loader.h index a3857e595a..577c28585e 100644 --- a/openjdkjvmti/ti_class_loader.h +++ b/openjdkjvmti/ti_class_loader.h @@ -36,32 +36,19 @@ #include <jni.h> -#include "art_jvmti.h" -#include "art_method.h" -#include "base/array_slice.h" #include "base/globals.h" -#include "base/mem_map.h" -#include "class_linker.h" -#include "dex/dex_file.h" -#include "dex/utf.h" -#include "gc_root-inl.h" -#include "jni/jni_env_ext-inl.h" +#include "base/mutex.h" #include "jvmti.h" -#include "linear_alloc.h" -#include "mirror/array-inl.h" #include "mirror/array.h" -#include "mirror/class-inl.h" -#include "mirror/class.h" -#include "mirror/class_loader-inl.h" -#include "mirror/string-inl.h" -#include "oat_file.h" -#include "obj_ptr.h" -#include "scoped_thread_state_change-inl.h" -#include "stack.h" -#include "thread_list.h" -#include "ti_class_definition.h" -#include "transform.h" -#include "utils/dex_cache_arrays_layout-inl.h" + +namespace art { + +class DexFile; +template <class MirrorType> class Handle; +template <class MirrorType> class ObjPtr; +class Thread; + +} // namespace art namespace openjdkjvmti { diff --git a/openjdkjvmti/ti_field.cc b/openjdkjvmti/ti_field.cc index 328e2a1e40..2a860d9f43 100644 --- a/openjdkjvmti/ti_field.cc +++ b/openjdkjvmti/ti_field.cc @@ -91,7 +91,7 @@ jvmtiError FieldUtil::GetFieldName(jvmtiEnv* env, if (generic_ptr != nullptr) { *generic_ptr = nullptr; if (!art_field->GetDeclaringClass()->IsProxyClass()) { - art::mirror::ObjectArray<art::mirror::String>* str_array = + art::ObjPtr<art::mirror::ObjectArray<art::mirror::String>> str_array = art::annotations::GetSignatureAnnotationForField(art_field); if (str_array != nullptr) { std::ostringstream oss; diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc index c0c312c490..d0b7224f93 100644 --- a/openjdkjvmti/ti_method.cc +++ b/openjdkjvmti/ti_method.cc @@ -345,7 +345,7 @@ jvmtiError MethodUtil::GetMethodName(jvmtiEnv* env, if (generic_ptr != nullptr) { *generic_ptr = nullptr; if (!art_method->GetDeclaringClass()->IsProxyClass()) { - art::mirror::ObjectArray<art::mirror::String>* str_array = + art::ObjPtr<art::mirror::ObjectArray<art::mirror::String>> str_array = art::annotations::GetSignatureAnnotationForMethod(art_method); if (str_array != nullptr) { std::ostringstream oss; diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc index 48e2958773..73e37199ed 100644 --- a/openjdkjvmti/ti_redefine.cc +++ b/openjdkjvmti/ti_redefine.cc @@ -61,6 +61,7 @@ #include "jit/jit_code_cache.h" #include "jni/jni_env_ext-inl.h" #include "jvmti_allocator.h" +#include "linear_alloc.h" #include "mirror/class-inl.h" #include "mirror/class_ext.h" #include "mirror/object.h" @@ -68,6 +69,8 @@ #include "non_debuggable_classes.h" #include "object_lock.h" #include "runtime.h" +#include "stack.h" +#include "thread_list.h" #include "ti_breakpoint.h" #include "ti_class_loader.h" #include "transform.h" diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h index 227eacd180..e337491ae3 100644 --- a/openjdkjvmti/ti_redefine.h +++ b/openjdkjvmti/ti_redefine.h @@ -37,34 +37,18 @@ #include <jni.h> #include "art_jvmti.h" -#include "art_method.h" #include "base/array_ref.h" #include "base/globals.h" -#include "base/mem_map.h" -#include "class_linker.h" #include "dex/dex_file.h" -#include "dex/utf.h" -#include "gc_root-inl.h" #include "jni/jni_env_ext-inl.h" #include "jvmti.h" -#include "linear_alloc.h" -#include "mirror/array-inl.h" #include "mirror/array.h" -#include "mirror/class-inl.h" #include "mirror/class.h" -#include "mirror/class_loader-inl.h" -#include "mirror/string-inl.h" -#include "oat_file.h" #include "obj_ptr.h" -#include "scoped_thread_state_change-inl.h" -#include "stack.h" -#include "thread_list.h" -#include "ti_class_definition.h" -#include "transform.h" -#include "utils/dex_cache_arrays_layout-inl.h" namespace openjdkjvmti { +class ArtClassDefinition; class RedefinitionDataHolder; class RedefinitionDataIter; diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc index bd44e491b0..370f59dc8a 100644 --- a/profman/profile_assistant_test.cc +++ b/profman/profile_assistant_test.cc @@ -22,6 +22,7 @@ #include "base/utils.h" #include "common_runtime_test.h" #include "dex/descriptors_names.h" +#include "dex/type_reference.h" #include "exec_utils.h" #include "linear_alloc.h" #include "mirror/class-inl.h" @@ -33,6 +34,7 @@ namespace art { using Hotness = ProfileCompilationInfo::MethodHotness; +using TypeReferenceSet = std::set<TypeReference, TypeReferenceValueComparator>; static constexpr size_t kMaxMethodIds = 65535; @@ -308,25 +310,24 @@ class ProfileAssistantTest : public CommonRuntimeTest { return true; } - mirror::Class* GetClass(jobject class_loader, const std::string& clazz) { + ObjPtr<mirror::Class> GetClass(ScopedObjectAccess& soa, + jobject class_loader, + const std::string& clazz) REQUIRES_SHARED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Thread* self = Thread::Current(); - ScopedObjectAccess soa(self); - StackHandleScope<1> hs(self); - Handle<mirror::ClassLoader> h_loader( - hs.NewHandle(ObjPtr<mirror::ClassLoader>::DownCast(self->DecodeJObject(class_loader)))); - return class_linker->FindClass(self, clazz.c_str(), h_loader); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> h_loader(hs.NewHandle( + ObjPtr<mirror::ClassLoader>::DownCast(soa.Self()->DecodeJObject(class_loader)))); + return class_linker->FindClass(soa.Self(), clazz.c_str(), h_loader); } ArtMethod* GetVirtualMethod(jobject class_loader, const std::string& clazz, const std::string& name) { - mirror::Class* klass = GetClass(class_loader, clazz); + ScopedObjectAccess soa(Thread::Current()); + ObjPtr<mirror::Class> klass = GetClass(soa, class_loader, clazz); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); const auto pointer_size = class_linker->GetImagePointerSize(); ArtMethod* method = nullptr; - Thread* self = Thread::Current(); - ScopedObjectAccess soa(self); for (auto& m : klass->GetVirtualMethods(pointer_size)) { if (name == m.GetName()) { EXPECT_TRUE(method == nullptr); @@ -336,9 +337,14 @@ class ProfileAssistantTest : public CommonRuntimeTest { return method; } + static TypeReference MakeTypeReference(ObjPtr<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_) { + return TypeReference(&klass->GetDexFile(), klass->GetDexTypeIndex()); + } + // Verify that given method has the expected inline caches and nothing else. void AssertInlineCaches(ArtMethod* method, - const std::set<mirror::Class*>& expected_clases, + const TypeReferenceSet& expected_clases, const ProfileCompilationInfo& info, bool is_megamorphic, bool is_missing_types) @@ -355,12 +361,11 @@ class ProfileAssistantTest : public CommonRuntimeTest { ASSERT_EQ(dex_pc_data.is_missing_types, is_missing_types); ASSERT_EQ(expected_clases.size(), dex_pc_data.classes.size()); size_t found = 0; - for (mirror::Class* it : expected_clases) { + for (const TypeReference& type_ref : expected_clases) { for (const auto& class_ref : dex_pc_data.classes) { ProfileCompilationInfo::DexReference dex_ref = pmi->dex_references[class_ref.dex_profile_index]; - if (dex_ref.MatchesDex(&(it->GetDexFile())) && - class_ref.type_index == it->GetDexTypeIndex()) { + if (dex_ref.MatchesDex(type_ref.dex_file) && class_ref.type_index == type_ref.TypeIndex()) { found++; } } @@ -715,7 +720,7 @@ TEST_F(ProfileAssistantTest, TestProfileCreationGenerateMethods) { ASSERT_TRUE(info.Load(GetFd(profile_file))); // Verify that the profile has matching methods. ScopedObjectAccess soa(Thread::Current()); - ObjPtr<mirror::Class> klass = GetClass(nullptr, "Ljava/lang/Math;"); + ObjPtr<mirror::Class> klass = GetClass(soa, /* class_loader */ nullptr, "Ljava/lang/Math;"); ASSERT_TRUE(klass != nullptr); size_t method_count = 0; for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) { @@ -907,9 +912,10 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) { jobject class_loader = LoadDex("ProfileTestMultiDex"); ASSERT_NE(class_loader, nullptr); - mirror::Class* sub_a = GetClass(class_loader, "LSubA;"); - mirror::Class* sub_b = GetClass(class_loader, "LSubB;"); - mirror::Class* sub_c = GetClass(class_loader, "LSubC;"); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::Class> sub_a = hs.NewHandle(GetClass(soa, class_loader, "LSubA;")); + Handle<mirror::Class> sub_b = hs.NewHandle(GetClass(soa, class_loader, "LSubB;")); + Handle<mirror::Class> sub_c = hs.NewHandle(GetClass(soa, class_loader, "LSubC;")); ASSERT_TRUE(sub_a != nullptr); ASSERT_TRUE(sub_b != nullptr); @@ -921,8 +927,8 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) { "LTestInline;", "inlineMonomorphic"); ASSERT_TRUE(inline_monomorphic != nullptr); - std::set<mirror::Class*> expected_monomorphic; - expected_monomorphic.insert(sub_a); + TypeReferenceSet expected_monomorphic; + expected_monomorphic.insert(MakeTypeReference(sub_a.Get())); AssertInlineCaches(inline_monomorphic, expected_monomorphic, info, @@ -936,10 +942,10 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) { "LTestInline;", "inlinePolymorphic"); ASSERT_TRUE(inline_polymorhic != nullptr); - std::set<mirror::Class*> expected_polymorphic; - expected_polymorphic.insert(sub_a); - expected_polymorphic.insert(sub_b); - expected_polymorphic.insert(sub_c); + TypeReferenceSet expected_polymorphic; + expected_polymorphic.insert(MakeTypeReference(sub_a.Get())); + expected_polymorphic.insert(MakeTypeReference(sub_b.Get())); + expected_polymorphic.insert(MakeTypeReference(sub_c.Get())); AssertInlineCaches(inline_polymorhic, expected_polymorphic, info, @@ -953,7 +959,7 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) { "LTestInline;", "inlineMegamorphic"); ASSERT_TRUE(inline_megamorphic != nullptr); - std::set<mirror::Class*> expected_megamorphic; + TypeReferenceSet expected_megamorphic; AssertInlineCaches(inline_megamorphic, expected_megamorphic, info, @@ -967,7 +973,7 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) { "LTestInline;", "inlineMissingTypes"); ASSERT_TRUE(inline_missing_types != nullptr); - std::set<mirror::Class*> expected_missing_Types; + TypeReferenceSet expected_missing_Types; AssertInlineCaches(inline_missing_types, expected_missing_Types, info, diff --git a/runtime/Android.bp b/runtime/Android.bp index b276c81d5a..777a1fc5ee 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -574,6 +574,7 @@ art_cc_test { "interpreter/safe_math_test.cc", "interpreter/unstarted_runtime_test.cc", "jdwp/jdwp_options_test.cc", + "jit/profiling_info_test.cc", "jni/java_vm_ext_test.cc", "method_handles_test.cc", "mirror/dex_cache_test.cc", diff --git a/runtime/arch/code_offset.h b/runtime/arch/code_offset.h deleted file mode 100644 index f0c6d22ef2..0000000000 --- a/runtime/arch/code_offset.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_ARCH_CODE_OFFSET_H_ -#define ART_RUNTIME_ARCH_CODE_OFFSET_H_ - -#include <iosfwd> - -#include <android-base/logging.h> - -#include "arch/instruction_set.h" -#include "base/bit_utils.h" -#include "base/macros.h" - -namespace art { - -// CodeOffset is a holder for compressed code offsets. Since some architectures have alignment -// requirements it is possible to compress code offsets to reduce stack map sizes. -class CodeOffset { - public: - ALWAYS_INLINE static CodeOffset FromOffset(uint32_t offset, InstructionSet isa = kRuntimeISA) { - return CodeOffset(offset / GetInstructionSetInstructionAlignment(isa)); - } - - ALWAYS_INLINE static CodeOffset FromCompressedOffset(uint32_t offset) { - return CodeOffset(offset); - } - - ALWAYS_INLINE uint32_t Uint32Value(InstructionSet isa = kRuntimeISA) const { - uint32_t decoded = value_ * GetInstructionSetInstructionAlignment(isa); - DCHECK_GE(decoded, value_) << "Integer overflow"; - return decoded; - } - - // Return compressed internal value. - ALWAYS_INLINE uint32_t CompressedValue() const { - return value_; - } - - ALWAYS_INLINE CodeOffset() = default; - ALWAYS_INLINE CodeOffset(const CodeOffset&) = default; - ALWAYS_INLINE CodeOffset& operator=(const CodeOffset&) = default; - ALWAYS_INLINE CodeOffset& operator=(CodeOffset&&) = default; - - private: - ALWAYS_INLINE explicit CodeOffset(uint32_t value) : value_(value) {} - - uint32_t value_ = 0u; -}; - -inline bool operator==(const CodeOffset& a, const CodeOffset& b) { - return a.CompressedValue() == b.CompressedValue(); -} - -inline bool operator!=(const CodeOffset& a, const CodeOffset& b) { - return !(a == b); -} - -inline bool operator<(const CodeOffset& a, const CodeOffset& b) { - return a.CompressedValue() < b.CompressedValue(); -} - -inline bool operator<=(const CodeOffset& a, const CodeOffset& b) { - return a.CompressedValue() <= b.CompressedValue(); -} - -inline bool operator>(const CodeOffset& a, const CodeOffset& b) { - return a.CompressedValue() > b.CompressedValue(); -} - -inline bool operator>=(const CodeOffset& a, const CodeOffset& b) { - return a.CompressedValue() >= b.CompressedValue(); -} - -inline std::ostream& operator<<(std::ostream& os, const CodeOffset& offset) { - return os << offset.Uint32Value(); -} - -} // namespace art - -#endif // ART_RUNTIME_ARCH_CODE_OFFSET_H_ diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 78516e3aeb..b0c0e43e35 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -21,6 +21,7 @@ #include "base/callee_save_type.h" #include "base/enums.h" #include "class_linker-inl.h" +#include "class_root.h" #include "common_runtime_test.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "imt_conflict_table.h" @@ -2096,7 +2097,7 @@ TEST_F(StubTest, ReadBarrierForRoot) { EXPECT_FALSE(self->IsExceptionPending()); - GcRoot<mirror::Class>& root = mirror::String::java_lang_String_; + GcRoot<mirror::Class> root(GetClassRoot<mirror::String>()); size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self); EXPECT_FALSE(self->IsExceptionPending()); diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 151c36f3bc..4e9f3c52e2 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -26,6 +26,7 @@ #include "class_linker-inl.h" #include "class_root.h" #include "debugger.h" +#include "dex/class_accessor-inl.h" #include "dex/descriptors_names.h" #include "dex/dex_file-inl.h" #include "dex/dex_file_exception_helpers.h" @@ -434,28 +435,14 @@ bool ArtMethod::IsPolymorphicSignature() { static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx) { - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx); - const uint8_t* class_data = dex_file.GetClassData(class_def); - CHECK(class_data != nullptr); - ClassDataItemIterator it(dex_file, class_data); - it.SkipAllFields(); - // Process methods - size_t class_def_method_index = 0; - while (it.HasNextDirectMethod()) { - if (it.GetMemberIndex() == method_idx) { + ClassAccessor accessor(dex_file, dex_file.GetClassDef(class_def_idx)); + uint32_t class_def_method_index = 0u; + for (const ClassAccessor::Method& method : accessor.GetMethods()) { + if (method.GetIndex() == method_idx) { return class_def_method_index; } class_def_method_index++; - it.Next(); } - while (it.HasNextVirtualMethod()) { - if (it.GetMemberIndex() == method_idx) { - return class_def_method_index; - } - class_def_method_index++; - it.Next(); - } - DCHECK(!it.HasNext()); LOG(FATAL) << "Failed to find method index " << method_idx << " in " << dex_file.GetLocation(); UNREACHABLE(); } diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h index 6917899bff..acdb235f8c 100644 --- a/runtime/check_reference_map_visitor.h +++ b/runtime/check_reference_map_visitor.h @@ -75,8 +75,7 @@ class CheckReferenceMapVisitor : public StackVisitor { for (int i = 0; i < number_of_references; ++i) { int reg = registers[i]; CHECK_LT(reg, accessor.RegistersSize()); - DexRegisterLocation location = dex_register_map.GetDexRegisterLocation( - reg, number_of_dex_registers, code_info); + DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(reg); switch (location.GetKind()) { case DexRegisterLocation::Kind::kNone: // Not set, should not be a reference. diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index 7a99d3dc5e..25eb85d675 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -33,8 +33,8 @@ namespace art { -inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, - ObjPtr<mirror::Class>* element_class) { +inline ObjPtr<mirror::Class> ClassLinker::FindArrayClass(Thread* self, + ObjPtr<mirror::Class>* element_class) { for (size_t i = 0; i < kFindArrayCacheSize; ++i) { // Read the cached array class once to avoid races with other threads setting it. ObjPtr<mirror::Class> array_class = find_array_class_cache_[i].Read(); diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index b882f65a9e..b03237f1cd 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -56,6 +56,7 @@ #include "compiler_callbacks.h" #include "debug_print.h" #include "debugger.h" +#include "dex/class_accessor-inl.h" #include "dex/descriptors_names.h" #include "dex/dex_file-inl.h" #include "dex/dex_file_exception_helpers.h" @@ -484,27 +485,10 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b mirror::ObjectArray<mirror::Object>::ClassSize(image_pointer_size_)))); object_array_class->SetComponentType(java_lang_Object.Get()); - // Setup the char (primitive) class to be used for char[]. - Handle<mirror::Class> char_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), - mirror::Class::PrimitiveClassSize(image_pointer_size_)))); - // The primitive char class won't be initialized by - // InitializePrimitiveClass until line 459, but strings (and - // internal char arrays) will be allocated before that and the - // component size, which is computed from the primitive type, needs - // to be set here. - char_class->SetPrimitiveType(Primitive::kPrimChar); - - // Setup the char[] class to be used for String. - Handle<mirror::Class> char_array_class(hs.NewHandle( - AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_)))); - char_array_class->SetComponentType(char_class.Get()); - // Setup String. Handle<mirror::Class> java_lang_String(hs.NewHandle( AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_)))); java_lang_String->SetStringClass(); - mirror::String::SetClass(java_lang_String.Get()); mirror::Class::SetStatus(java_lang_String, ClassStatus::kResolved, self); // Setup java.lang.ref.Reference. @@ -523,7 +507,6 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b SetClassRoot(ClassRoot::kJavaLangObject, java_lang_Object.Get()); SetClassRoot(ClassRoot::kClassArrayClass, class_array_class.Get()); SetClassRoot(ClassRoot::kObjectArrayClass, object_array_class.Get()); - SetClassRoot(ClassRoot::kCharArrayClass, char_array_class.Get()); SetClassRoot(ClassRoot::kJavaLangString, java_lang_String.Get()); SetClassRoot(ClassRoot::kJavaLangRefReference, java_lang_ref_Reference.Get()); @@ -533,6 +516,7 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Setup the primitive type classes. SetClassRoot(ClassRoot::kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean)); SetClassRoot(ClassRoot::kPrimitiveByte, CreatePrimitiveClass(self, Primitive::kPrimByte)); + SetClassRoot(ClassRoot::kPrimitiveChar, CreatePrimitiveClass(self, Primitive::kPrimChar)); SetClassRoot(ClassRoot::kPrimitiveShort, CreatePrimitiveClass(self, Primitive::kPrimShort)); SetClassRoot(ClassRoot::kPrimitiveInt, CreatePrimitiveClass(self, Primitive::kPrimInt)); SetClassRoot(ClassRoot::kPrimitiveLong, CreatePrimitiveClass(self, Primitive::kPrimLong)); @@ -543,13 +527,13 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Create array interface entries to populate once we can load system classes. array_iftable_ = GcRoot<mirror::IfTable>(AllocIfTable(self, 2)); - // Create int array type for AllocDexCache (done in AppendToBootClassPath). + // Create int array type for native pointer arrays (for example vtables) on 32-bit archs. Handle<mirror::Class> int_array_class(hs.NewHandle( AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_)))); int_array_class->SetComponentType(GetClassRoot(ClassRoot::kPrimitiveInt, this)); SetClassRoot(ClassRoot::kIntArrayClass, int_array_class.Get()); - // Create long array type for AllocDexCache (done in AppendToBootClassPath). + // Create long array type for native pointer arrays (for example vtables) on 64-bit archs. Handle<mirror::Class> long_array_class(hs.NewHandle( AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_)))); long_array_class->SetComponentType(GetClassRoot(ClassRoot::kPrimitiveLong, this)); @@ -604,10 +588,6 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // now we can use FindSystemClass - // run char class through InitializePrimitiveClass to finish init - InitializePrimitiveClass(char_class.Get(), Primitive::kPrimChar); - SetClassRoot(ClassRoot::kPrimitiveChar, char_class.Get()); // needs descriptor - // Set up GenericJNI entrypoint. That is mainly a hack for common_compiler_test.h so that // we do not need friend classes or a publicly exposed setter. quick_generic_jni_trampoline_ = GetQuickGenericJniStub(); @@ -636,7 +616,7 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b SetClassRoot(ClassRoot::kByteArrayClass, FindSystemClass(self, "[B")); - CheckSystemClass(self, char_array_class, "[C"); + SetClassRoot(ClassRoot::kCharArrayClass, FindSystemClass(self, "[C")); SetClassRoot(ClassRoot::kShortArrayClass, FindSystemClass(self, "[S")); @@ -685,7 +665,7 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b FindSystemClass(self, "Ljava/lang/reflect/Proxy;")); // Create java.lang.reflect.Field.class root. - auto* class_root = FindSystemClass(self, "Ljava/lang/reflect/Field;"); + ObjPtr<mirror::Class> class_root = FindSystemClass(self, "Ljava/lang/reflect/Field;"); CHECK(class_root != nullptr); SetClassRoot(ClassRoot::kJavaLangReflectField, class_root); @@ -1014,10 +994,6 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) { spaces[0]->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots))); mirror::Class::SetClassClass(GetClassRoot(ClassRoot::kJavaLangClass, this)); - // Special case of setting up the String class early so that we can test arbitrary objects - // as being Strings or not - mirror::String::SetClass(GetClassRoot<mirror::String>(this)); - ObjPtr<mirror::Class> java_lang_Object = GetClassRoot<mirror::Object>(this); java_lang_Object->SetObjectSize(sizeof(mirror::Object)); // Allocate in non-movable so that it's possible to check if a JNI weak global ref has been @@ -2084,8 +2060,7 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) { // Add 100 in case new classes get loaded when we are filling in the object array. class_table_size = NumZygoteClasses() + NumNonZygoteClasses() + 100; } - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> array_of_class = FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> array_of_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(this); classes.Assign( mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, class_table_size)); CHECK(classes != nullptr); // OOME. @@ -2109,7 +2084,6 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) { ClassLinker::~ClassLinker() { mirror::Class::ResetClass(); - mirror::String::ResetClass(); Thread* const self = Thread::Current(); for (const ClassLoaderData& data : class_loaders_) { // CHA unloading analysis is not needed. No negative consequences are expected because @@ -2189,9 +2163,9 @@ mirror::DexCache* ClassLinker::AllocAndInitializeDexCache(Thread* self, return dex_cache.Ptr(); } -mirror::Class* ClassLinker::AllocClass(Thread* self, - ObjPtr<mirror::Class> java_lang_Class, - uint32_t class_size) { +ObjPtr<mirror::Class> ClassLinker::AllocClass(Thread* self, + ObjPtr<mirror::Class> java_lang_Class, + uint32_t class_size) { DCHECK_GE(class_size, sizeof(mirror::Class)); gc::Heap* heap = Runtime::Current()->GetHeap(); mirror::Class::InitializeClassVisitor visitor(class_size); @@ -2205,7 +2179,7 @@ mirror::Class* ClassLinker::AllocClass(Thread* self, return k->AsClass(); } -mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) { +ObjPtr<mirror::Class> ClassLinker::AllocClass(Thread* self, uint32_t class_size) { return AllocClass(self, GetClassRoot<mirror::Class>(this), class_size); } @@ -2216,9 +2190,9 @@ mirror::ObjectArray<mirror::StackTraceElement>* ClassLinker::AllocStackTraceElem self, GetClassRoot<mirror::ObjectArray<mirror::StackTraceElement>>(this), length); } -mirror::Class* ClassLinker::EnsureResolved(Thread* self, - const char* descriptor, - ObjPtr<mirror::Class> klass) { +ObjPtr<mirror::Class> ClassLinker::EnsureResolved(Thread* self, + const char* descriptor, + ObjPtr<mirror::Class> klass) { DCHECK(klass != nullptr); if (kIsDebugBuild) { StackHandleScope<1> hs(self); @@ -2426,9 +2400,9 @@ ObjPtr<mirror::Class> ClassLinker::FindClassInBaseDexClassLoaderClassPath( return ret; } -mirror::Class* ClassLinker::FindClass(Thread* self, - const char* descriptor, - Handle<mirror::ClassLoader> class_loader) { +ObjPtr<mirror::Class> ClassLinker::FindClass(Thread* self, + const char* descriptor, + Handle<mirror::ClassLoader> class_loader) { DCHECK_NE(*descriptor, '\0') << "descriptor is empty string"; DCHECK(self != nullptr); self->AssertNoPendingException(); @@ -2597,12 +2571,12 @@ mirror::Class* ClassLinker::FindClass(Thread* self, return result_ptr.Ptr(); } -mirror::Class* ClassLinker::DefineClass(Thread* self, - const char* descriptor, - size_t hash, - Handle<mirror::ClassLoader> class_loader, - const DexFile& dex_file, - const DexFile::ClassDef& dex_class_def) { +ObjPtr<mirror::Class> ClassLinker::DefineClass(Thread* self, + const char* descriptor, + size_t hash, + Handle<mirror::ClassLoader> class_loader, + const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def) { StackHandleScope<3> hs(self); auto klass = hs.NewHandle<mirror::Class>(nullptr); @@ -2760,52 +2734,50 @@ mirror::Class* ClassLinker::DefineClass(Thread* self, uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) { - const uint8_t* class_data = dex_file.GetClassData(dex_class_def); size_t num_ref = 0; size_t num_8 = 0; size_t num_16 = 0; size_t num_32 = 0; size_t num_64 = 0; - if (class_data != nullptr) { - // We allow duplicate definitions of the same field in a class_data_item - // but ignore the repeated indexes here, b/21868015. - uint32_t last_field_idx = dex::kDexNoIndex; - for (ClassDataItemIterator it(dex_file, class_data); it.HasNextStaticField(); it.Next()) { - uint32_t field_idx = it.GetMemberIndex(); - // Ordering enforced by DexFileVerifier. - DCHECK(last_field_idx == dex::kDexNoIndex || last_field_idx <= field_idx); - if (UNLIKELY(field_idx == last_field_idx)) { - continue; - } - last_field_idx = field_idx; - const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx); - const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id); - char c = descriptor[0]; - switch (c) { - case 'L': - case '[': - num_ref++; - break; - case 'J': - case 'D': - num_64++; - break; - case 'I': - case 'F': - num_32++; - break; - case 'S': - case 'C': - num_16++; - break; - case 'B': - case 'Z': - num_8++; - break; - default: - LOG(FATAL) << "Unknown descriptor: " << c; - UNREACHABLE(); - } + ClassAccessor accessor(dex_file, dex_class_def); + // We allow duplicate definitions of the same field in a class_data_item + // but ignore the repeated indexes here, b/21868015. + uint32_t last_field_idx = dex::kDexNoIndex; + for (const ClassAccessor::Field& field : accessor.GetStaticFields()) { + uint32_t field_idx = field.GetIndex(); + // Ordering enforced by DexFileVerifier. + DCHECK(last_field_idx == dex::kDexNoIndex || last_field_idx <= field_idx); + if (UNLIKELY(field_idx == last_field_idx)) { + continue; + } + last_field_idx = field_idx; + const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx); + const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id); + char c = descriptor[0]; + switch (c) { + case 'L': + case '[': + num_ref++; + break; + case 'J': + case 'D': + num_64++; + break; + case 'I': + case 'F': + num_32++; + break; + case 'S': + case 'C': + num_16++; + break; + case 'B': + case 'Z': + num_8++; + break; + default: + LOG(FATAL) << "Unknown descriptor: " << c; + UNREACHABLE(); } } return mirror::Class::ComputeClassSize(false, @@ -2903,17 +2875,15 @@ void ClassLinker::FixupStaticTrampolines(ObjPtr<mirror::Class> klass) { const DexFile& dex_file = klass->GetDexFile(); const DexFile::ClassDef* dex_class_def = klass->GetClassDef(); CHECK(dex_class_def != nullptr); - const uint8_t* class_data = dex_file.GetClassData(*dex_class_def); + ClassAccessor accessor(dex_file, *dex_class_def); // There should always be class data if there were direct methods. - CHECK(class_data != nullptr) << klass->PrettyDescriptor(); - ClassDataItemIterator it(dex_file, class_data); - it.SkipAllFields(); + CHECK(accessor.HasClassData()) << klass->PrettyDescriptor(); bool has_oat_class; OatFile::OatClass oat_class = OatFile::FindOatClass(dex_file, klass->GetDexClassDefIndex(), &has_oat_class); // Link the code of methods skipped by LinkCode. - for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) { + for (size_t method_index = 0; method_index < accessor.NumDirectMethods(); ++method_index) { ArtMethod* method = klass->GetDirectMethod(method_index, image_pointer_size_); if (!method->IsStatic()) { // Only update static methods. @@ -3022,17 +2992,6 @@ void ClassLinker::SetupClass(const DexFile& dex_file, klass->SetDexTypeIndex(dex_class_def.class_idx_); } -void ClassLinker::LoadClass(Thread* self, - const DexFile& dex_file, - const DexFile::ClassDef& dex_class_def, - Handle<mirror::Class> klass) { - const uint8_t* class_data = dex_file.GetClassData(dex_class_def); - if (class_data == nullptr) { - return; // no fields or methods - for example a marker interface - } - LoadClassMembers(self, dex_file, class_data, klass); -} - LengthPrefixedArray<ArtField>* ClassLinker::AllocArtFieldArray(Thread* self, LinearAlloc* allocator, size_t length) { @@ -3091,10 +3050,15 @@ LinearAlloc* ClassLinker::GetOrCreateAllocatorForClassLoader(ObjPtr<mirror::Clas return allocator; } -void ClassLinker::LoadClassMembers(Thread* self, - const DexFile& dex_file, - const uint8_t* class_data, - Handle<mirror::Class> klass) { +void ClassLinker::LoadClass(Thread* self, + const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def, + Handle<mirror::Class> klass) { + ClassAccessor accessor(dex_file, dex_class_def); + if (!accessor.HasClassData()) { + return; + } + Runtime* const runtime = Runtime::Current(); { // Note: We cannot have thread suspension until the field and method arrays are setup or else // Class::VisitFieldRoots may miss some fields or methods. @@ -3103,45 +3067,79 @@ void ClassLinker::LoadClassMembers(Thread* self, // We allow duplicate definitions of the same field in a class_data_item // but ignore the repeated indexes here, b/21868015. LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader()); - ClassDataItemIterator it(dex_file, class_data); LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, - it.NumStaticFields()); - size_t num_sfields = 0; - uint32_t last_field_idx = 0u; - for (; it.HasNextStaticField(); it.Next()) { - uint32_t field_idx = it.GetMemberIndex(); - DCHECK_GE(field_idx, last_field_idx); // Ordering enforced by DexFileVerifier. - if (num_sfields == 0 || LIKELY(field_idx > last_field_idx)) { - DCHECK_LT(num_sfields, it.NumStaticFields()); - LoadField(it, klass, &sfields->At(num_sfields)); - ++num_sfields; - last_field_idx = field_idx; - } - } - - // Load instance fields. + accessor.NumStaticFields()); LengthPrefixedArray<ArtField>* ifields = AllocArtFieldArray(self, allocator, - it.NumInstanceFields()); + accessor.NumInstanceFields()); + size_t num_sfields = 0u; size_t num_ifields = 0u; - last_field_idx = 0u; - for (; it.HasNextInstanceField(); it.Next()) { - uint32_t field_idx = it.GetMemberIndex(); - DCHECK_GE(field_idx, last_field_idx); // Ordering enforced by DexFileVerifier. - if (num_ifields == 0 || LIKELY(field_idx > last_field_idx)) { - DCHECK_LT(num_ifields, it.NumInstanceFields()); - LoadField(it, klass, &ifields->At(num_ifields)); - ++num_ifields; - last_field_idx = field_idx; - } - } + uint32_t last_static_field_idx = 0u; + uint32_t last_instance_field_idx = 0u; - if (UNLIKELY(num_sfields != it.NumStaticFields()) || - UNLIKELY(num_ifields != it.NumInstanceFields())) { + // Methods + bool has_oat_class = false; + const OatFile::OatClass oat_class = (runtime->IsStarted() && !runtime->IsAotCompiler()) + ? OatFile::FindOatClass(dex_file, klass->GetDexClassDefIndex(), &has_oat_class) + : OatFile::OatClass::Invalid(); + const OatFile::OatClass* oat_class_ptr = has_oat_class ? &oat_class : nullptr; + klass->SetMethodsPtr( + AllocArtMethodArray(self, allocator, accessor.NumMethods()), + accessor.NumDirectMethods(), + accessor.NumVirtualMethods()); + size_t class_def_method_index = 0; + uint32_t last_dex_method_index = dex::kDexNoIndex; + size_t last_class_def_method_index = 0; + + // Use the visitor since the ranged based loops are bit slower from seeking. Seeking to the + // methods needs to decode all of the fields. + accessor.VisitFieldsAndMethods([&]( + const ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) { + uint32_t field_idx = field.GetIndex(); + DCHECK_GE(field_idx, last_static_field_idx); // Ordering enforced by DexFileVerifier. + if (num_sfields == 0 || LIKELY(field_idx > last_static_field_idx)) { + LoadField(field, klass, &sfields->At(num_sfields)); + ++num_sfields; + last_static_field_idx = field_idx; + } + }, [&](const ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) { + uint32_t field_idx = field.GetIndex(); + DCHECK_GE(field_idx, last_instance_field_idx); // Ordering enforced by DexFileVerifier. + if (num_ifields == 0 || LIKELY(field_idx > last_instance_field_idx)) { + LoadField(field, klass, &ifields->At(num_ifields)); + ++num_ifields; + last_instance_field_idx = field_idx; + } + }, [&](const ClassAccessor::Method& method) REQUIRES_SHARED(Locks::mutator_lock_) { + ArtMethod* art_method = klass->GetDirectMethodUnchecked(class_def_method_index, + image_pointer_size_); + LoadMethod(dex_file, method, klass, art_method); + LinkCode(this, art_method, oat_class_ptr, class_def_method_index); + uint32_t it_method_index = method.GetIndex(); + if (last_dex_method_index == it_method_index) { + // duplicate case + art_method->SetMethodIndex(last_class_def_method_index); + } else { + art_method->SetMethodIndex(class_def_method_index); + last_dex_method_index = it_method_index; + last_class_def_method_index = class_def_method_index; + } + ++class_def_method_index; + }, [&](const ClassAccessor::Method& method) REQUIRES_SHARED(Locks::mutator_lock_) { + ArtMethod* art_method = klass->GetVirtualMethodUnchecked( + class_def_method_index - accessor.NumDirectMethods(), + image_pointer_size_); + LoadMethod(dex_file, method, klass, art_method); + LinkCode(this, art_method, oat_class_ptr, class_def_method_index); + ++class_def_method_index; + }); + + if (UNLIKELY(num_ifields + num_sfields != accessor.NumFields())) { LOG(WARNING) << "Duplicate fields in class " << klass->PrettyDescriptor() - << " (unique static fields: " << num_sfields << "/" << it.NumStaticFields() - << ", unique instance fields: " << num_ifields << "/" << it.NumInstanceFields() << ")"; + << " (unique static fields: " << num_sfields << "/" << accessor.NumStaticFields() + << ", unique instance fields: " << num_ifields << "/" << accessor.NumInstanceFields() + << ")"; // NOTE: Not shrinking the over-allocated sfields/ifields, just setting size. if (sfields != nullptr) { sfields->SetSize(num_sfields); @@ -3155,87 +3153,49 @@ void ClassLinker::LoadClassMembers(Thread* self, DCHECK_EQ(klass->NumStaticFields(), num_sfields); klass->SetIFieldsPtr(ifields); DCHECK_EQ(klass->NumInstanceFields(), num_ifields); - // Load methods. - bool has_oat_class = false; - const OatFile::OatClass oat_class = - (Runtime::Current()->IsStarted() && !Runtime::Current()->IsAotCompiler()) - ? OatFile::FindOatClass(dex_file, klass->GetDexClassDefIndex(), &has_oat_class) - : OatFile::OatClass::Invalid(); - const OatFile::OatClass* oat_class_ptr = has_oat_class ? &oat_class : nullptr; - klass->SetMethodsPtr( - AllocArtMethodArray(self, allocator, it.NumDirectMethods() + it.NumVirtualMethods()), - it.NumDirectMethods(), - it.NumVirtualMethods()); - size_t class_def_method_index = 0; - uint32_t last_dex_method_index = dex::kDexNoIndex; - size_t last_class_def_method_index = 0; - // TODO These should really use the iterators. - for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) { - ArtMethod* method = klass->GetDirectMethodUnchecked(i, image_pointer_size_); - LoadMethod(dex_file, it, klass, method); - LinkCode(this, method, oat_class_ptr, class_def_method_index); - uint32_t it_method_index = it.GetMemberIndex(); - if (last_dex_method_index == it_method_index) { - // duplicate case - method->SetMethodIndex(last_class_def_method_index); - } else { - method->SetMethodIndex(class_def_method_index); - last_dex_method_index = it_method_index; - last_class_def_method_index = class_def_method_index; - } - class_def_method_index++; - } - for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) { - ArtMethod* method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); - LoadMethod(dex_file, it, klass, method); - DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i); - LinkCode(this, method, oat_class_ptr, class_def_method_index); - class_def_method_index++; - } - DCHECK(!it.HasNext()); } // Ensure that the card is marked so that remembered sets pick up native roots. Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass.Get()); self->AllowThreadSuspension(); } -void ClassLinker::LoadField(const ClassDataItemIterator& it, +void ClassLinker::LoadField(const ClassAccessor::Field& field, Handle<mirror::Class> klass, ArtField* dst) { - const uint32_t field_idx = it.GetMemberIndex(); + const uint32_t field_idx = field.GetIndex(); dst->SetDexFieldIndex(field_idx); dst->SetDeclaringClass(klass.Get()); // Get access flags from the DexFile. If this is a boot class path class, // also set its runtime hidden API access flags. - uint32_t access_flags = it.GetFieldAccessFlags(); + uint32_t access_flags = field.GetAccessFlags(); if (klass->IsBootStrapClassLoaded()) { access_flags = - HiddenApiAccessFlags::EncodeForRuntime(access_flags, it.DecodeHiddenAccessFlags()); + HiddenApiAccessFlags::EncodeForRuntime(access_flags, field.DecodeHiddenAccessFlags()); } dst->SetAccessFlags(access_flags); } void ClassLinker::LoadMethod(const DexFile& dex_file, - const ClassDataItemIterator& it, + const ClassAccessor::Method& method, Handle<mirror::Class> klass, ArtMethod* dst) { - uint32_t dex_method_idx = it.GetMemberIndex(); + const uint32_t dex_method_idx = method.GetIndex(); const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_); ScopedAssertNoThreadSuspension ants("LoadMethod"); dst->SetDexMethodIndex(dex_method_idx); dst->SetDeclaringClass(klass.Get()); - dst->SetCodeItemOffset(it.GetMethodCodeItemOffset()); + dst->SetCodeItemOffset(method.GetCodeItemOffset()); // Get access flags from the DexFile. If this is a boot class path class, // also set its runtime hidden API access flags. - uint32_t access_flags = it.GetMethodAccessFlags(); + uint32_t access_flags = method.GetAccessFlags(); if (klass->IsBootStrapClassLoaded()) { access_flags = - HiddenApiAccessFlags::EncodeForRuntime(access_flags, it.DecodeHiddenAccessFlags()); + HiddenApiAccessFlags::EncodeForRuntime(access_flags, method.DecodeHiddenAccessFlags()); } if (UNLIKELY(strcmp("finalize", method_name) == 0)) { @@ -3560,21 +3520,14 @@ ClassLinker::DexCacheData ClassLinker::FindDexCacheDataLocked(const DexFile& dex return DexCacheData(); } -mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) { - ObjPtr<mirror::Class> klass = +ObjPtr<mirror::Class> ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) { + ObjPtr<mirror::Class> primitive_class = AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_)); - if (UNLIKELY(klass == nullptr)) { + if (UNLIKELY(primitive_class == nullptr)) { self->AssertPendingOOMException(); return nullptr; } - return InitializePrimitiveClass(klass, type); -} - -mirror::Class* ClassLinker::InitializePrimitiveClass(ObjPtr<mirror::Class> primitive_class, - Primitive::Type type) { - CHECK(primitive_class != nullptr); // Must hold lock on object when initializing. - Thread* self = Thread::Current(); StackHandleScope<1> hs(self); Handle<mirror::Class> h_class(hs.NewHandle(primitive_class)); ObjectLock<mirror::Class> lock(self, h_class); @@ -3603,8 +3556,10 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(ObjPtr<mirror::Class> primi // array class; that always comes from the base element class. // // Returns null with an exception raised on failure. -mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash, - Handle<mirror::ClassLoader> class_loader) { +ObjPtr<mirror::Class> ClassLinker::CreateArrayClass(Thread* self, + const char* descriptor, + size_t hash, + Handle<mirror::ClassLoader> class_loader) { // Identify the underlying component type CHECK_EQ('[', descriptor[0]); StackHandleScope<2> hs(self); @@ -3668,8 +3623,6 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto new_class.Assign(GetClassRoot<mirror::ObjectArray<mirror::Object>>(this)); } else if (strcmp(descriptor, "[Ljava/lang/String;") == 0) { new_class.Assign(GetClassRoot<mirror::ObjectArray<mirror::String>>(this)); - } else if (strcmp(descriptor, "[C") == 0) { - new_class.Assign(GetClassRoot<mirror::CharArray>(this)); } else if (strcmp(descriptor, "[I") == 0) { new_class.Assign(GetClassRoot<mirror::IntArray>(this)); } else if (strcmp(descriptor, "[J") == 0) { @@ -3753,27 +3706,27 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto return existing.Ptr(); } -mirror::Class* ClassLinker::FindPrimitiveClass(char type) { +ObjPtr<mirror::Class> ClassLinker::FindPrimitiveClass(char type) { ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots = GetClassRoots(); switch (type) { case 'B': - return GetClassRoot(ClassRoot::kPrimitiveByte, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveByte, class_roots); case 'C': - return GetClassRoot(ClassRoot::kPrimitiveChar, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveChar, class_roots); case 'D': - return GetClassRoot(ClassRoot::kPrimitiveDouble, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveDouble, class_roots); case 'F': - return GetClassRoot(ClassRoot::kPrimitiveFloat, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveFloat, class_roots); case 'I': - return GetClassRoot(ClassRoot::kPrimitiveInt, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveInt, class_roots); case 'J': - return GetClassRoot(ClassRoot::kPrimitiveLong, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveLong, class_roots); case 'S': - return GetClassRoot(ClassRoot::kPrimitiveShort, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveShort, class_roots); case 'Z': - return GetClassRoot(ClassRoot::kPrimitiveBoolean, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveBoolean, class_roots); case 'V': - return GetClassRoot(ClassRoot::kPrimitiveVoid, class_roots).Ptr(); + return GetClassRoot(ClassRoot::kPrimitiveVoid, class_roots); default: break; } @@ -3782,7 +3735,9 @@ mirror::Class* ClassLinker::FindPrimitiveClass(char type) { return nullptr; } -mirror::Class* ClassLinker::InsertClass(const char* descriptor, ObjPtr<mirror::Class> klass, size_t hash) { +ObjPtr<mirror::Class> ClassLinker::InsertClass(const char* descriptor, + ObjPtr<mirror::Class> klass, + size_t hash) { if (VLOG_IS_ON(class_linker)) { ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache(); std::string source; @@ -3837,16 +3792,16 @@ void ClassLinker::UpdateClassMethods(ObjPtr<mirror::Class> klass, Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass); } -mirror::Class* ClassLinker::LookupClass(Thread* self, - const char* descriptor, - ObjPtr<mirror::ClassLoader> class_loader) { +ObjPtr<mirror::Class> ClassLinker::LookupClass(Thread* self, + const char* descriptor, + ObjPtr<mirror::ClassLoader> class_loader) { return LookupClass(self, descriptor, ComputeModifiedUtf8Hash(descriptor), class_loader); } -mirror::Class* ClassLinker::LookupClass(Thread* self, - const char* descriptor, - size_t hash, - ObjPtr<mirror::ClassLoader> class_loader) { +ObjPtr<mirror::Class> ClassLinker::LookupClass(Thread* self, + const char* descriptor, + size_t hash, + ObjPtr<mirror::ClassLoader> class_loader) { ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); ClassTable* const class_table = ClassTableForClassLoader(class_loader); if (class_table != nullptr) { @@ -4299,12 +4254,12 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(ArtMethod* method) { } } -mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, - jstring name, - jobjectArray interfaces, - jobject loader, - jobjectArray methods, - jobjectArray throws) { +ObjPtr<mirror::Class> ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, + jstring name, + jobjectArray interfaces, + jobject loader, + jobjectArray methods, + jobjectArray throws) { Thread* self = soa.Self(); StackHandleScope<10> hs(self); MutableHandle<mirror::Class> temp_klass(hs.NewHandle( @@ -4807,24 +4762,29 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, this, *dex_class_def); const DexFile& dex_file = *dex_cache->GetDexFile(); - const uint8_t* class_data = dex_file.GetClassData(*dex_class_def); - ClassDataItemIterator field_it(dex_file, class_data); + if (value_it.HasNext()) { - DCHECK(field_it.HasNextStaticField()); + ClassAccessor accessor(dex_file, *dex_class_def); CHECK(can_init_statics); - for ( ; value_it.HasNext(); value_it.Next(), field_it.Next()) { - ArtField* field = ResolveField( - field_it.GetMemberIndex(), dex_cache, class_loader, /* is_static */ true); + for (const ClassAccessor::Field& field : accessor.GetStaticFields()) { + if (!value_it.HasNext()) { + break; + } + ArtField* art_field = ResolveField(field.GetIndex(), + dex_cache, + class_loader, + /* is_static */ true); if (Runtime::Current()->IsActiveTransaction()) { - value_it.ReadValueToField<true>(field); + value_it.ReadValueToField<true>(art_field); } else { - value_it.ReadValueToField<false>(field); + value_it.ReadValueToField<false>(art_field); } if (self->IsExceptionPending()) { break; } - DCHECK(!value_it.HasNext() || field_it.HasNextStaticField()); + value_it.Next(); } + DCHECK(self->IsExceptionPending() || !value_it.HasNext()); } } @@ -8147,8 +8107,7 @@ ObjPtr<mirror::MethodType> ClassLinker::ResolveMethodType( // other than by looking at the shorty ? const size_t num_method_args = strlen(dex_file.StringDataByIdx(proto_id.shorty_idx_)) - 1; - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> array_of_class = FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> array_of_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(this); Handle<mirror::ObjectArray<mirror::Class>> method_params(hs.NewHandle( mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, num_method_args))); if (method_params == nullptr) { @@ -8254,11 +8213,10 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField( } StackHandleScope<4> hs(self); - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> array_of_class = FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> array_of_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(this); Handle<mirror::ObjectArray<mirror::Class>> method_params(hs.NewHandle( mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, num_params))); - if (UNLIKELY(method_params.Get() == nullptr)) { + if (UNLIKELY(method_params == nullptr)) { DCHECK(self->IsExceptionPending()); return nullptr; } @@ -8433,8 +8391,7 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod( int32_t num_params = static_cast<int32_t>(shorty_length + receiver_count - 1); StackHandleScope<7> hs(self); - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> array_of_class = FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> array_of_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(this); Handle<mirror::ObjectArray<mirror::Class>> method_params(hs.NewHandle( mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, num_params))); if (method_params.Get() == nullptr) { @@ -8932,19 +8889,19 @@ class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor { const PointerSize pointer_size_; }; -mirror::Class* ClassLinker::GetHoldingClassOfCopiedMethod(ArtMethod* method) { +ObjPtr<mirror::Class> ClassLinker::GetHoldingClassOfCopiedMethod(ArtMethod* method) { ScopedTrace trace(__FUNCTION__); // Since this function is slow, have a trace to notify people. CHECK(method->IsCopied()); FindVirtualMethodHolderVisitor visitor(method, image_pointer_size_); VisitClasses(&visitor); - return visitor.holder_.Ptr(); + return visitor.holder_; } -mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) { - return down_cast<mirror::IfTable*>( +ObjPtr<mirror::IfTable> ClassLinker::AllocIfTable(Thread* self, size_t ifcount) { + return ObjPtr<mirror::IfTable>::DownCast(ObjPtr<mirror::ObjectArray<mirror::Object>>( mirror::IfTable::Alloc(self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(this), - ifcount * mirror::IfTable::kMax)); + ifcount * mirror::IfTable::kMax))); } // Instantiate ResolveMethod. diff --git a/runtime/class_linker.h b/runtime/class_linker.h index afe5c99990..e14414483a 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -27,6 +27,7 @@ #include "base/enums.h" #include "base/macros.h" #include "base/mutex.h" +#include "dex/class_accessor.h" #include "dex/dex_cache_resolved_classes.h" #include "dex/dex_file.h" #include "dex/dex_file_types.h" @@ -146,22 +147,22 @@ class ClassLinker { // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. - mirror::Class* FindClass(Thread* self, - const char* descriptor, - Handle<mirror::ClassLoader> class_loader) + ObjPtr<mirror::Class> FindClass(Thread* self, + const char* descriptor, + Handle<mirror::ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_); // Finds a class by its descriptor using the "system" class loader, ie by searching the // boot_class_path_. - mirror::Class* FindSystemClass(Thread* self, const char* descriptor) + ObjPtr<mirror::Class> FindSystemClass(Thread* self, const char* descriptor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_) { return FindClass(self, descriptor, ScopedNullHandle<mirror::ClassLoader>()); } // Finds the array class given for the element class. - mirror::Class* FindArrayClass(Thread* self, ObjPtr<mirror::Class>* element_class) + ObjPtr<mirror::Class> FindArrayClass(Thread* self, ObjPtr<mirror::Class>* element_class) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_); @@ -171,20 +172,20 @@ class ClassLinker { } // Define a new a class based on a ClassDef from a DexFile - mirror::Class* DefineClass(Thread* self, - const char* descriptor, - size_t hash, - Handle<mirror::ClassLoader> class_loader, - const DexFile& dex_file, - const DexFile::ClassDef& dex_class_def) + ObjPtr<mirror::Class> DefineClass(Thread* self, + const char* descriptor, + size_t hash, + Handle<mirror::ClassLoader> class_loader, + const DexFile& dex_file, + const DexFile::ClassDef& dex_class_def) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_); // Finds a class by its descriptor, returning null if it isn't wasn't loaded // by the given 'class_loader'. - mirror::Class* LookupClass(Thread* self, - const char* descriptor, - ObjPtr<mirror::ClassLoader> class_loader) + ObjPtr<mirror::Class> LookupClass(Thread* self, + const char* descriptor, + ObjPtr<mirror::ClassLoader> class_loader) REQUIRES(!Locks::classlinker_classes_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -193,7 +194,7 @@ class ClassLinker { REQUIRES(!Locks::classlinker_classes_lock_) REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Class* FindPrimitiveClass(char type) REQUIRES_SHARED(Locks::mutator_lock_); + ObjPtr<mirror::Class> FindPrimitiveClass(char type) REQUIRES_SHARED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) REQUIRES(!Locks::classlinker_classes_lock_); @@ -456,7 +457,7 @@ class ClassLinker { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount) + ObjPtr<mirror::IfTable> AllocIfTable(Thread* self, size_t ifcount) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); @@ -483,12 +484,12 @@ class ClassLinker { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_); - mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, - jstring name, - jobjectArray interfaces, - jobject loader, - jobjectArray methods, - jobjectArray throws) + ObjPtr<mirror::Class> CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, + jstring name, + jobjectArray interfaces, + jobject loader, + jobjectArray methods, + jobjectArray throws) REQUIRES_SHARED(Locks::mutator_lock_); std::string GetDescriptorForProxy(ObjPtr<mirror::Class> proxy_class) REQUIRES_SHARED(Locks::mutator_lock_); @@ -531,7 +532,9 @@ class ClassLinker { // Attempts to insert a class into a class table. Returns null if // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. - mirror::Class* InsertClass(const char* descriptor, ObjPtr<mirror::Class> klass, size_t hash) + ObjPtr<mirror::Class> InsertClass(const char* descriptor, + ObjPtr<mirror::Class> klass, + size_t hash) REQUIRES(!Locks::classlinker_classes_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -659,7 +662,7 @@ class ClassLinker { REQUIRES(!Locks::dex_lock_); // Get the actual holding class for a copied method. Pretty slow, don't call often. - mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method) + ObjPtr<mirror::Class> GetHoldingClassOfCopiedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); // Returns null if not found. @@ -763,16 +766,16 @@ class ClassLinker { REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_); // For early bootstrapping by Init - mirror::Class* AllocClass(Thread* self, - ObjPtr<mirror::Class> java_lang_Class, - uint32_t class_size) + ObjPtr<mirror::Class> AllocClass(Thread* self, + ObjPtr<mirror::Class> java_lang_Class, + uint32_t class_size) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - // Alloc* convenience functions to avoid needing to pass in mirror::Class* - // values that are known to the ClassLinker such as - // kObjectArrayClass and kJavaLangString etc. - mirror::Class* AllocClass(Thread* self, uint32_t class_size) + // Alloc* convenience functions to avoid needing to pass in ObjPtr<mirror::Class> + // values that are known to the ClassLinker such as classes corresponding to + // ClassRoot::kObjectArrayClass and ClassRoot::kJavaLangString etc. + ObjPtr<mirror::Class> AllocClass(Thread* self, uint32_t class_size) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); @@ -790,18 +793,14 @@ class ClassLinker { REQUIRES(!Locks::dex_lock_) REQUIRES(!Roles::uninterruptible_); - mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type) - REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!Roles::uninterruptible_); - mirror::Class* InitializePrimitiveClass(ObjPtr<mirror::Class> primitive_class, - Primitive::Type type) + ObjPtr<mirror::Class> CreatePrimitiveClass(Thread* self, Primitive::Type type) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - mirror::Class* CreateArrayClass(Thread* self, - const char* descriptor, - size_t hash, - Handle<mirror::ClassLoader> class_loader) + ObjPtr<mirror::Class> CreateArrayClass(Thread* self, + const char* descriptor, + size_t hash, + Handle<mirror::ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_); @@ -827,18 +826,14 @@ class ClassLinker { const DexFile::ClassDef& dex_class_def, Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_); - void LoadClassMembers(Thread* self, - const DexFile& dex_file, - const uint8_t* class_data, - Handle<mirror::Class> klass) - REQUIRES_SHARED(Locks::mutator_lock_); - void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtField* dst) + void LoadField(const ClassAccessor::Field& field, Handle<mirror::Class> klass, ArtField* dst) REQUIRES_SHARED(Locks::mutator_lock_); void LoadMethod(const DexFile& dex_file, - const ClassDataItemIterator& it, - Handle<mirror::Class> klass, ArtMethod* dst) + const ClassAccessor::Method& method, + Handle<mirror::Class> klass, + ArtMethod* dst) REQUIRES_SHARED(Locks::mutator_lock_); void FixupStaticTrampolines(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_); @@ -893,10 +888,10 @@ class ClassLinker { // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded // by the given 'class_loader'. Uses the provided hash for the descriptor. - mirror::Class* LookupClass(Thread* self, - const char* descriptor, - size_t hash, - ObjPtr<mirror::ClassLoader> class_loader) + ObjPtr<mirror::Class> LookupClass(Thread* self, + const char* descriptor, + size_t hash, + ObjPtr<mirror::ClassLoader> class_loader) REQUIRES(!Locks::classlinker_classes_lock_) REQUIRES_SHARED(Locks::mutator_lock_); @@ -1167,7 +1162,9 @@ class ClassLinker { // when resolution has occurred. This happens in mirror::Class::SetStatus. As resolution may // retire a class, the version of the class in the table is returned and this may differ from // the class passed in. - mirror::Class* EnsureResolved(Thread* self, const char* descriptor, ObjPtr<mirror::Class> klass) + ObjPtr<mirror::Class> EnsureResolved(Thread* self, + const char* descriptor, + ObjPtr<mirror::Class> klass) WARN_UNUSED REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_); diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 48ec6b6c27..5d420aae04 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -198,7 +198,8 @@ class ClassLinkerTest : public CommonRuntimeTest { ASSERT_STREQ(array_descriptor.c_str(), array->GetDescriptor(&temp)); EXPECT_TRUE(array->GetSuperClass() != nullptr); Thread* self = Thread::Current(); - EXPECT_EQ(class_linker_->FindSystemClass(self, "Ljava/lang/Object;"), array->GetSuperClass()); + EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(self, "Ljava/lang/Object;"), + array->GetSuperClass()); EXPECT_TRUE(array->HasSuperClass()); ASSERT_TRUE(array->GetComponentType() != nullptr); ASSERT_GT(strlen(array->GetComponentType()->GetDescriptor(&temp)), 0U); @@ -1079,27 +1080,27 @@ TEST_F(ClassLinkerTest, ValidatePrimitiveArrayElementsOffset) { ScopedObjectAccess soa(Thread::Current()); StackHandleScope<5> hs(soa.Self()); Handle<mirror::LongArray> long_array(hs.NewHandle(mirror::LongArray::Alloc(soa.Self(), 0))); - EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[J"), long_array->GetClass()); + EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "[J"), long_array->GetClass()); uintptr_t data_offset = reinterpret_cast<uintptr_t>(long_array->GetData()); EXPECT_TRUE(IsAligned<8>(data_offset)); // Longs require 8 byte alignment Handle<mirror::DoubleArray> double_array(hs.NewHandle(mirror::DoubleArray::Alloc(soa.Self(), 0))); - EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[D"), double_array->GetClass()); + EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "[D"), double_array->GetClass()); data_offset = reinterpret_cast<uintptr_t>(double_array->GetData()); EXPECT_TRUE(IsAligned<8>(data_offset)); // Doubles require 8 byte alignment Handle<mirror::IntArray> int_array(hs.NewHandle(mirror::IntArray::Alloc(soa.Self(), 0))); - EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[I"), int_array->GetClass()); + EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "[I"), int_array->GetClass()); data_offset = reinterpret_cast<uintptr_t>(int_array->GetData()); EXPECT_TRUE(IsAligned<4>(data_offset)); // Ints require 4 byte alignment Handle<mirror::CharArray> char_array(hs.NewHandle(mirror::CharArray::Alloc(soa.Self(), 0))); - EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[C"), char_array->GetClass()); + EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "[C"), char_array->GetClass()); data_offset = reinterpret_cast<uintptr_t>(char_array->GetData()); EXPECT_TRUE(IsAligned<2>(data_offset)); // Chars require 2 byte alignment Handle<mirror::ShortArray> short_array(hs.NewHandle(mirror::ShortArray::Alloc(soa.Self(), 0))); - EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[S"), short_array->GetClass()); + EXPECT_OBJ_PTR_EQ(class_linker_->FindSystemClass(soa.Self(), "[S"), short_array->GetClass()); data_offset = reinterpret_cast<uintptr_t>(short_array->GetData()); EXPECT_TRUE(IsAligned<2>(data_offset)); // Shorts require 2 byte alignment diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc index ffa0a9065a..5cb08dc278 100644 --- a/runtime/dex/dex_file_annotations.cc +++ b/runtime/dex/dex_file_annotations.cc @@ -849,7 +849,8 @@ ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass, return annotation_value.value_.GetL(); } -mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass, +static ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureValue( + const ClassData& klass, const DexFile::AnnotationSetItem* annotation_set) REQUIRES_SHARED(Locks::mutator_lock_) { const DexFile& dex_file = klass.GetDexFile(); @@ -860,12 +861,9 @@ mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass, if (annotation_item == nullptr) { return nullptr; } - ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString(); - Handle<mirror::Class> string_array_class(hs.NewHandle( - Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class))); - if (string_array_class == nullptr) { - return nullptr; - } + Handle<mirror::Class> string_array_class = + hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::String>>()); + DCHECK(string_array_class != nullptr); ObjPtr<mirror::Object> obj = GetAnnotationValue(klass, annotation_item, "value", string_array_class, DexFile::kDexAnnotationArray); @@ -880,19 +878,16 @@ ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue( const DexFile::AnnotationSetItem* annotation_set) REQUIRES_SHARED(Locks::mutator_lock_) { const DexFile& dex_file = klass.GetDexFile(); - StackHandleScope<1> hs(Thread::Current()); const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Throws;", DexFile::kDexVisibilitySystem); if (annotation_item == nullptr) { return nullptr; } - ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); - Handle<mirror::Class> class_array_class(hs.NewHandle( - Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &class_class))); - if (class_array_class == nullptr) { - return nullptr; - } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> class_array_class = + hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Class>>()); + DCHECK(class_array_class != nullptr); ObjPtr<mirror::Object> obj = GetAnnotationValue(klass, annotation_item, "value", class_array_class, DexFile::kDexAnnotationArray); @@ -1020,7 +1015,7 @@ ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* fie return ProcessAnnotationSet(field_class, annotation_set, DexFile::kDexVisibilityRuntime); } -mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) { +ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForField(ArtField* field) { const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); if (annotation_set == nullptr) { return nullptr; @@ -1171,9 +1166,10 @@ ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method, annotation_class); } -bool GetParametersMetadataForMethod(ArtMethod* method, - MutableHandle<mirror::ObjectArray<mirror::String>>* names, - MutableHandle<mirror::IntArray>* access_flags) { +bool GetParametersMetadataForMethod( + ArtMethod* method, + /*out*/ MutableHandle<mirror::ObjectArray<mirror::String>>* names, + /*out*/ MutableHandle<mirror::IntArray>* access_flags) { const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); if (annotation_set == nullptr) { @@ -1193,12 +1189,10 @@ bool GetParametersMetadataForMethod(ArtMethod* method, StackHandleScope<4> hs(Thread::Current()); // Extract the parameters' names String[]. - ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString(); - Handle<mirror::Class> string_array_class(hs.NewHandle( - Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class))); - if (UNLIKELY(string_array_class == nullptr)) { - return false; - } + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Handle<mirror::Class> string_array_class = + hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::String>>(class_linker)); + DCHECK(string_array_class != nullptr); ClassData data(method); Handle<mirror::Object> names_obj = @@ -1212,10 +1206,8 @@ bool GetParametersMetadataForMethod(ArtMethod* method, } // Extract the parameters' access flags int[]. - Handle<mirror::Class> int_array_class(hs.NewHandle(GetClassRoot<mirror::IntArray>())); - if (UNLIKELY(int_array_class == nullptr)) { - return false; - } + Handle<mirror::Class> int_array_class(hs.NewHandle(GetClassRoot<mirror::IntArray>(class_linker))); + DCHECK(int_array_class != nullptr); Handle<mirror::Object> access_flags_obj = hs.NewHandle(GetAnnotationValue(data, annotation_item, @@ -1226,12 +1218,12 @@ bool GetParametersMetadataForMethod(ArtMethod* method, return false; } - names->Assign(names_obj.Get()->AsObjectArray<mirror::String>()); - access_flags->Assign(access_flags_obj.Get()->AsIntArray()); + names->Assign(names_obj->AsObjectArray<mirror::String>()); + access_flags->Assign(access_flags_obj->AsIntArray()); return true; } -mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) { +ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForMethod(ArtMethod* method) { const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); if (annotation_set == nullptr) { return nullptr; @@ -1345,12 +1337,9 @@ ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Cla return nullptr; } StackHandleScope<1> hs(Thread::Current()); - ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); - Handle<mirror::Class> class_array_class(hs.NewHandle( - Runtime::Current()->GetClassLinker()->FindArrayClass(hs.Self(), &class_class))); - if (class_array_class == nullptr) { - return nullptr; - } + Handle<mirror::Class> class_array_class = + hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Class>>()); + DCHECK(class_array_class != nullptr); ObjPtr<mirror::Object> obj = GetAnnotationValue(data, annotation_item, "value", class_array_class, DexFile::kDexAnnotationArray); @@ -1446,7 +1435,7 @@ ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) { DexFile::kDexAnnotationMethod); } -bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name) { +bool GetInnerClass(Handle<mirror::Class> klass, /*out*/ ObjPtr<mirror::String>* name) { ClassData data(klass); const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { @@ -1513,7 +1502,8 @@ bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) { return true; } -mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass) { +ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForClass( + Handle<mirror::Class> klass) { ClassData data(klass); const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h index 9645a7febd..bde7891091 100644 --- a/runtime/dex/dex_file_annotations.h +++ b/runtime/dex/dex_file_annotations.h @@ -41,7 +41,7 @@ ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field, REQUIRES_SHARED(Locks::mutator_lock_); ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_); -mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) +ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForField(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_); bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) REQUIRES_SHARED(Locks::mutator_lock_); @@ -64,11 +64,11 @@ ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method, uint32_t parameter_idx, Handle<mirror::Class> annotation_class) REQUIRES_SHARED(Locks::mutator_lock_); -bool GetParametersMetadataForMethod(ArtMethod* method, - MutableHandle<mirror::ObjectArray<mirror::String>>* names, - MutableHandle<mirror::IntArray>* access_flags) - REQUIRES_SHARED(Locks::mutator_lock_); -mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) +bool GetParametersMetadataForMethod( + ArtMethod* method, + /*out*/ MutableHandle<mirror::ObjectArray<mirror::String>>* names, + /*out*/ MutableHandle<mirror::IntArray>* access_flags) REQUIRES_SHARED(Locks::mutator_lock_); +ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); // Check whether `method` is annotated with `annotation_class`. // If `lookup_in_resolved_boot_classes` is true, look up any of the @@ -101,12 +101,12 @@ ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_); ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_); -bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name) +bool GetInnerClass(Handle<mirror::Class> klass, /*out*/ ObjPtr<mirror::String>* name) REQUIRES_SHARED(Locks::mutator_lock_); bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) REQUIRES_SHARED(Locks::mutator_lock_); -mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass) - REQUIRES_SHARED(Locks::mutator_lock_); +ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForClass( + Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_); const char* GetSourceDebugExtension(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_); bool IsClassAnnotationPresent(Handle<mirror::Class> klass, diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index ed5885f224..5f7594c68d 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -25,6 +25,7 @@ #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" +#include "mirror/string-inl.h" namespace art { diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h index d5861ed5d8..c94b666695 100644 --- a/runtime/gc/space/space_test.h +++ b/runtime/gc/space/space_test.h @@ -53,13 +53,11 @@ class SpaceTest : public Super { } mirror::Class* GetByteArrayClass(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { - StackHandleScope<1> hs(self); - auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr)); if (byte_array_class_ == nullptr) { - mirror::Class* byte_array_class = - Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader); + ObjPtr<mirror::Class> byte_array_class = + Runtime::Current()->GetClassLinker()->FindSystemClass(self, "[B"); EXPECT_TRUE(byte_array_class != nullptr); - byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class); + byte_array_class_ = self->GetJniEnv()->NewLocalRef(byte_array_class.Ptr()); EXPECT_TRUE(byte_array_class_ != nullptr); } return self->DecodeJObject(byte_array_class_)->AsClass(); diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc index ab0c2901ff..a41d28492d 100644 --- a/runtime/hidden_api_test.cc +++ b/runtime/hidden_api_test.cc @@ -325,8 +325,8 @@ TEST_F(HiddenApiTest, CheckMemberSignatureForProxyClass) { ASSERT_TRUE(h_iface != nullptr); // Create the proxy class. - std::vector<mirror::Class*> interfaces; - interfaces.push_back(h_iface.Get()); + std::vector<Handle<mirror::Class>> interfaces; + interfaces.push_back(h_iface); Handle<mirror::Class> proxyClass = hs.NewHandle(proxy_test::GenerateProxyClass( soa, jclass_loader_, runtime_->GetClassLinker(), "$Proxy1234", interfaces)); ASSERT_TRUE(proxyClass != nullptr); diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index fab350942a..56658c3005 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -1128,9 +1128,9 @@ static ObjPtr<mirror::MethodType> BuildCallSiteForBootstrapMethod(Thread* self, StackHandleScope<2> hs(self); // Create array for parameter types. - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ObjPtr<mirror::Class> class_array_type = class_linker->FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> class_array_type = + GetClassRoot<mirror::ObjectArray<mirror::Class>>(class_linker); Handle<mirror::ObjectArray<mirror::Class>> ptypes = hs.NewHandle( mirror::ObjectArray<mirror::Class>::Alloc(self, class_array_type, diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 37234e1462..0ee780d32d 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -35,6 +35,7 @@ #include "base/macros.h" #include "base/mutex.h" #include "class_linker-inl.h" +#include "class_root.h" #include "common_dex_operations.h" #include "common_throws.h" #include "dex/dex_file-inl.h" @@ -328,7 +329,7 @@ static inline ObjPtr<mirror::String> ResolveString(Thread* self, ShadowFrame& shadow_frame, dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_) { - ObjPtr<mirror::Class> java_lang_string_class = mirror::String::GetJavaLangString(); + ObjPtr<mirror::Class> java_lang_string_class = GetClassRoot<mirror::String>(); if (UNLIKELY(!java_lang_string_class->IsInitialized())) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); StackHandleScope<1> hs(self); diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S index 7017dd149c..0722804265 100644 --- a/runtime/interpreter/mterp/arm64/header.S +++ b/runtime/interpreter/mterp/arm64/header.S @@ -339,6 +339,7 @@ codes. */ .macro ENTRY name .type \name, #function + .hidden \name // Hide this as a global symbol, so we do not incur plt calls. .global \name /* Cache alignment for function entry */ .balign 16 diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S index d5374d2a8a..70f71ff2bc 100644 --- a/runtime/interpreter/mterp/out/mterp_arm64.S +++ b/runtime/interpreter/mterp/out/mterp_arm64.S @@ -346,6 +346,7 @@ codes. */ .macro ENTRY name .type \name, #function + .hidden \name // Hide this as a global symbol, so we do not incur plt calls. .global \name /* Cache alignment for function entry */ .balign 16 diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S index 6f4752f312..1eacfe8736 100644 --- a/runtime/interpreter/mterp/out/mterp_x86.S +++ b/runtime/interpreter/mterp/out/mterp_x86.S @@ -106,11 +106,13 @@ unspecified registers or condition codes. #define SIZE(start,end) // Mac OS' symbols have an _ prefix. #define SYMBOL(name) _ ## name + #define ASM_HIDDEN .private_extern #else #define MACRO_LITERAL(value) $value #define FUNCTION_TYPE(name) .type name, @function #define SIZE(start,end) .size start, .-end #define SYMBOL(name) name + #define ASM_HIDDEN .hidden #endif .macro PUSH _reg @@ -339,6 +341,7 @@ unspecified registers or condition codes. */ .text + ASM_HIDDEN SYMBOL(ExecuteMterpImpl) .global SYMBOL(ExecuteMterpImpl) FUNCTION_TYPE(ExecuteMterpImpl) diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S index fca2515698..ea8f483e95 100644 --- a/runtime/interpreter/mterp/out/mterp_x86_64.S +++ b/runtime/interpreter/mterp/out/mterp_x86_64.S @@ -102,11 +102,13 @@ unspecified registers or condition codes. #define SIZE(start,end) // Mac OS' symbols have an _ prefix. #define SYMBOL(name) _ ## name + #define ASM_HIDDEN .private_extern #else #define MACRO_LITERAL(value) $value #define FUNCTION_TYPE(name) .type name, @function #define SIZE(start,end) .size start, .-end #define SYMBOL(name) name + #define ASM_HIDDEN .hidden #endif .macro PUSH _reg @@ -325,6 +327,7 @@ unspecified registers or condition codes. */ .text + ASM_HIDDEN SYMBOL(ExecuteMterpImpl) .global SYMBOL(ExecuteMterpImpl) FUNCTION_TYPE(ExecuteMterpImpl) diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S index 324637bf9a..939dc61d95 100644 --- a/runtime/interpreter/mterp/x86/entry.S +++ b/runtime/interpreter/mterp/x86/entry.S @@ -18,6 +18,7 @@ */ .text + ASM_HIDDEN SYMBOL(ExecuteMterpImpl) .global SYMBOL(ExecuteMterpImpl) FUNCTION_TYPE(ExecuteMterpImpl) diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S index 9d826c2ce2..6f31228005 100644 --- a/runtime/interpreter/mterp/x86/header.S +++ b/runtime/interpreter/mterp/x86/header.S @@ -99,11 +99,13 @@ unspecified registers or condition codes. #define SIZE(start,end) // Mac OS' symbols have an _ prefix. #define SYMBOL(name) _ ## name + #define ASM_HIDDEN .private_extern #else #define MACRO_LITERAL(value) $$value #define FUNCTION_TYPE(name) .type name, @function #define SIZE(start,end) .size start, .-end #define SYMBOL(name) name + #define ASM_HIDDEN .hidden #endif .macro PUSH _reg diff --git a/runtime/interpreter/mterp/x86_64/entry.S b/runtime/interpreter/mterp/x86_64/entry.S index 2f69226206..b08419b219 100644 --- a/runtime/interpreter/mterp/x86_64/entry.S +++ b/runtime/interpreter/mterp/x86_64/entry.S @@ -18,6 +18,7 @@ */ .text + ASM_HIDDEN SYMBOL(ExecuteMterpImpl) .global SYMBOL(ExecuteMterpImpl) FUNCTION_TYPE(ExecuteMterpImpl) diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S index 55638106ed..4ebe95e987 100644 --- a/runtime/interpreter/mterp/x86_64/header.S +++ b/runtime/interpreter/mterp/x86_64/header.S @@ -95,11 +95,13 @@ unspecified registers or condition codes. #define SIZE(start,end) // Mac OS' symbols have an _ prefix. #define SYMBOL(name) _ ## name + #define ASM_HIDDEN .private_extern #else #define MACRO_LITERAL(value) $$value #define FUNCTION_TYPE(name) .type name, @function #define SIZE(start,end) .size start, .-end #define SYMBOL(name) name + #define ASM_HIDDEN .hidden #endif .macro PUSH _reg diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index 9bb760c6b7..fee837572b 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -196,7 +196,7 @@ class UnstartedRuntimeTest : public CommonRuntimeTest { // Prepare for aborts. Aborts assume that the exception class is already resolved, as the // loading code doesn't work under transactions. void PrepareForAborts() REQUIRES_SHARED(Locks::mutator_lock_) { - mirror::Object* result = Runtime::Current()->GetClassLinker()->FindClass( + ObjPtr<mirror::Object> result = Runtime::Current()->GetClassLinker()->FindClass( Thread::Current(), Transaction::kAbortExceptionSignature, ScopedNullHandle<mirror::ClassLoader>()); @@ -388,7 +388,7 @@ TEST_F(UnstartedRuntimeTest, StringCharAt) { TEST_F(UnstartedRuntimeTest, StringInit) { Thread* self = Thread::Current(); ScopedObjectAccess soa(self); - ObjPtr<mirror::Class> klass = mirror::String::GetJavaLangString(); + ObjPtr<mirror::Class> klass = GetClassRoot<mirror::String>(); ArtMethod* method = klass->FindConstructor("(Ljava/lang/String;)V", Runtime::Current()->GetClassLinker()->GetImagePointerSize()); @@ -537,7 +537,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) { tmp, false, object_class.Get(), - mirror::String::GetJavaLangString(), + GetClassRoot<mirror::String>(), hs_src, 1, hs_dst, @@ -551,7 +551,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) { { StackHandleScope<3> hs_src(self); hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "1")); - hs_src.NewHandle(mirror::String::GetJavaLangString()); + hs_src.NewHandle(GetClassRoot<mirror::String>()); hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "3")); StackHandleScope<3> hs_dst(self); @@ -568,7 +568,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) { tmp, true, object_class.Get(), - mirror::String::GetJavaLangString(), + GetClassRoot<mirror::String>(), hs_src, 0, hs_dst, diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index 9c02dce98a..b7b779ce31 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -515,8 +515,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread, // this dex pc. } else { for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) { - DexRegisterLocation::Kind location = - vreg_map.GetLocationKind(vreg, number_of_vregs, code_info); + DexRegisterLocation::Kind location = vreg_map.GetLocationKind(vreg); if (location == DexRegisterLocation::Kind::kNone) { // Dex register is dead or uninitialized. continue; @@ -530,9 +529,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread, DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack); int32_t vreg_value = shadow_frame->GetVReg(vreg); - int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg, - number_of_vregs, - code_info); + int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg); DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size)); DCHECK_GT(slot_offset, 0); (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value; diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc new file mode 100644 index 0000000000..106a80a568 --- /dev/null +++ b/runtime/jit/profiling_info_test.cc @@ -0,0 +1,329 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <gtest/gtest.h> +#include <stdio.h> + +#include "art_method-inl.h" +#include "base/unix_file/fd_file.h" +#include "class_linker-inl.h" +#include "common_runtime_test.h" +#include "dex/dex_file.h" +#include "dex/dex_file_loader.h" +#include "dex/method_reference.h" +#include "dex/type_reference.h" +#include "handle_scope-inl.h" +#include "linear_alloc.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "profile/profile_compilation_info.h" +#include "scoped_thread_state_change-inl.h" +#include "ziparchive/zip_writer.h" + +namespace art { + +using Hotness = ProfileCompilationInfo::MethodHotness; + +static constexpr size_t kMaxMethodIds = 65535; + +class ProfileCompilationInfoTest : public CommonRuntimeTest { + public: + void PostRuntimeCreate() OVERRIDE { + allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool())); + } + + protected: + std::vector<ArtMethod*> GetVirtualMethods(jobject class_loader, + const std::string& clazz) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + StackHandleScope<1> hs(self); + Handle<mirror::ClassLoader> h_loader( + hs.NewHandle(self->DecodeJObject(class_loader)->AsClassLoader())); + ObjPtr<mirror::Class> klass = class_linker->FindClass(self, clazz.c_str(), h_loader); + + const auto pointer_size = class_linker->GetImagePointerSize(); + std::vector<ArtMethod*> methods; + for (auto& m : klass->GetVirtualMethods(pointer_size)) { + methods.push_back(&m); + } + return methods; + } + + bool AddMethod(const std::string& dex_location, + uint32_t checksum, + uint16_t method_index, + ProfileCompilationInfo* info) { + return info->AddMethodIndex(Hotness::kFlagHot, + dex_location, + checksum, + method_index, + kMaxMethodIds); + } + + bool AddMethod(const std::string& dex_location, + uint32_t checksum, + uint16_t method_index, + const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi, + ProfileCompilationInfo* info) { + return info->AddMethod( + dex_location, checksum, method_index, kMaxMethodIds, pmi, Hotness::kFlagPostStartup); + } + + bool AddClass(const std::string& dex_location, + uint32_t checksum, + dex::TypeIndex type_index, + ProfileCompilationInfo* info) { + DexCacheResolvedClasses classes(dex_location, dex_location, checksum, kMaxMethodIds); + classes.AddClass(type_index); + return info->AddClasses({classes}); + } + + uint32_t GetFd(const ScratchFile& file) { + return static_cast<uint32_t>(file.GetFd()); + } + + bool SaveProfilingInfo( + const std::string& filename, + const std::vector<ArtMethod*>& methods, + const std::set<DexCacheResolvedClasses>& resolved_classes, + Hotness::Flag flags) { + ProfileCompilationInfo info; + std::vector<ProfileMethodInfo> profile_methods; + ScopedObjectAccess soa(Thread::Current()); + for (ArtMethod* method : methods) { + profile_methods.emplace_back( + MethodReference(method->GetDexFile(), method->GetDexMethodIndex())); + } + if (!info.AddMethods(profile_methods, flags) || !info.AddClasses(resolved_classes)) { + return false; + } + if (info.GetNumberOfMethods() != profile_methods.size()) { + return false; + } + ProfileCompilationInfo file_profile; + if (!file_profile.Load(filename, false)) { + return false; + } + if (!info.MergeWith(file_profile)) { + return false; + } + + return info.Save(filename, nullptr); + } + + // Saves the given art methods to a profile backed by 'filename' and adds + // some fake inline caches to it. The added inline caches are returned in + // the out map `profile_methods_map`. + bool SaveProfilingInfoWithFakeInlineCaches( + const std::string& filename, + const std::vector<ArtMethod*>& methods, + Hotness::Flag flags, + /*out*/ SafeMap<ArtMethod*, ProfileMethodInfo>* profile_methods_map) { + ProfileCompilationInfo info; + std::vector<ProfileMethodInfo> profile_methods; + ScopedObjectAccess soa(Thread::Current()); + for (ArtMethod* method : methods) { + std::vector<ProfileMethodInfo::ProfileInlineCache> caches; + // Monomorphic + for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) { + std::vector<TypeReference> classes; + classes.emplace_back(method->GetDexFile(), dex::TypeIndex(0)); + caches.emplace_back(dex_pc, /*is_missing_types*/false, classes); + } + // Polymorphic + for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) { + std::vector<TypeReference> classes; + for (uint16_t k = 0; k < InlineCache::kIndividualCacheSize / 2; k++) { + classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k)); + } + caches.emplace_back(dex_pc, /*is_missing_types*/false, classes); + } + // Megamorphic + for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) { + std::vector<TypeReference> classes; + for (uint16_t k = 0; k < 2 * InlineCache::kIndividualCacheSize; k++) { + classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k)); + } + caches.emplace_back(dex_pc, /*is_missing_types*/false, classes); + } + // Missing types + for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) { + std::vector<TypeReference> classes; + caches.emplace_back(dex_pc, /*is_missing_types*/true, classes); + } + ProfileMethodInfo pmi(MethodReference(method->GetDexFile(), + method->GetDexMethodIndex()), + caches); + profile_methods.push_back(pmi); + profile_methods_map->Put(method, pmi); + } + + if (!info.AddMethods(profile_methods, flags) + || info.GetNumberOfMethods() != profile_methods.size()) { + return false; + } + return info.Save(filename, nullptr); + } + + // Creates an inline cache which will be destructed at the end of the test. + ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() { + used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap( + std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile))); + return used_inline_caches.back().get(); + } + + ProfileCompilationInfo::OfflineProfileMethodInfo ConvertProfileMethodInfo( + const ProfileMethodInfo& pmi) { + ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap(); + ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi(ic_map); + SafeMap<DexFile*, uint8_t> dex_map; // dex files to profile index + for (const auto& inline_cache : pmi.inline_caches) { + ProfileCompilationInfo::DexPcData& dex_pc_data = + ic_map->FindOrAdd( + inline_cache.dex_pc, ProfileCompilationInfo::DexPcData(allocator_.get()))->second; + if (inline_cache.is_missing_types) { + dex_pc_data.SetIsMissingTypes(); + } + for (const auto& class_ref : inline_cache.classes) { + uint8_t dex_profile_index = dex_map.FindOrAdd(const_cast<DexFile*>(class_ref.dex_file), + static_cast<uint8_t>(dex_map.size()))->second; + dex_pc_data.AddClass(dex_profile_index, class_ref.TypeIndex()); + if (dex_profile_index >= offline_pmi.dex_references.size()) { + // This is a new dex. + const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileKey( + class_ref.dex_file->GetLocation()); + offline_pmi.dex_references.emplace_back(dex_key, + class_ref.dex_file->GetLocationChecksum(), + class_ref.dex_file->NumMethodIds()); + } + } + } + return offline_pmi; + } + + // Cannot sizeof the actual arrays so hard code the values here. + // They should not change anyway. + static constexpr int kProfileMagicSize = 4; + static constexpr int kProfileVersionSize = 4; + + std::unique_ptr<ArenaAllocator> allocator_; + + // Cache of inline caches generated during tests. + // This makes it easier to pass data between different utilities and ensure that + // caches are destructed at the end of the test. + std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches; +}; + +TEST_F(ProfileCompilationInfoTest, SaveArtMethods) { + ScratchFile profile; + + Thread* self = Thread::Current(); + jobject class_loader; + { + ScopedObjectAccess soa(self); + class_loader = LoadDex("ProfileTestMultiDex"); + } + ASSERT_NE(class_loader, nullptr); + + // Save virtual methods from Main. + std::set<DexCacheResolvedClasses> resolved_classes; + std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;"); + ASSERT_TRUE(SaveProfilingInfo( + profile.GetFilename(), main_methods, resolved_classes, Hotness::kFlagPostStartup)); + + // Check that what we saved is in the profile. + ProfileCompilationInfo info1; + ASSERT_TRUE(info1.Load(GetFd(profile))); + ASSERT_EQ(info1.GetNumberOfMethods(), main_methods.size()); + { + ScopedObjectAccess soa(self); + for (ArtMethod* m : main_methods) { + Hotness h = info1.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); + ASSERT_TRUE(h.IsHot()); + ASSERT_TRUE(h.IsPostStartup()); + } + } + + // Save virtual methods from Second. + std::vector<ArtMethod*> second_methods = GetVirtualMethods(class_loader, "LSecond;"); + ASSERT_TRUE(SaveProfilingInfo( + profile.GetFilename(), second_methods, resolved_classes, Hotness::kFlagStartup)); + + // Check that what we saved is in the profile (methods form Main and Second). + ProfileCompilationInfo info2; + ASSERT_TRUE(profile.GetFile()->ResetOffset()); + ASSERT_TRUE(info2.Load(GetFd(profile))); + ASSERT_EQ(info2.GetNumberOfMethods(), main_methods.size() + second_methods.size()); + { + ScopedObjectAccess soa(self); + for (ArtMethod* m : main_methods) { + Hotness h = info2.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); + ASSERT_TRUE(h.IsHot()); + ASSERT_TRUE(h.IsPostStartup()); + } + for (ArtMethod* m : second_methods) { + Hotness h = info2.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); + ASSERT_TRUE(h.IsHot()); + ASSERT_TRUE(h.IsStartup()); + } + } +} + +TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) { + ScratchFile profile; + + Thread* self = Thread::Current(); + jobject class_loader; + { + ScopedObjectAccess soa(self); + class_loader = LoadDex("ProfileTestMultiDex"); + } + ASSERT_NE(class_loader, nullptr); + + // Save virtual methods from Main. + std::set<DexCacheResolvedClasses> resolved_classes; + std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;"); + + SafeMap<ArtMethod*, ProfileMethodInfo> profile_methods_map; + ASSERT_TRUE(SaveProfilingInfoWithFakeInlineCaches( + profile.GetFilename(), main_methods, Hotness::kFlagStartup, &profile_methods_map)); + + // Check that what we saved is in the profile. + ProfileCompilationInfo info; + ASSERT_TRUE(info.Load(GetFd(profile))); + ASSERT_EQ(info.GetNumberOfMethods(), main_methods.size()); + { + ScopedObjectAccess soa(self); + for (ArtMethod* m : main_methods) { + Hotness h = info.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex())); + ASSERT_TRUE(h.IsHot()); + ASSERT_TRUE(h.IsStartup()); + const ProfileMethodInfo& pmi = profile_methods_map.find(m)->second; + std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_pmi = + info.GetMethod(m->GetDexFile()->GetLocation(), + m->GetDexFile()->GetLocationChecksum(), + m->GetDexMethodIndex()); + ASSERT_TRUE(offline_pmi != nullptr); + ProfileCompilationInfo::OfflineProfileMethodInfo converted_pmi = + ConvertProfileMethodInfo(pmi); + ASSERT_EQ(converted_pmi, *offline_pmi); + } + } +} + +} // namespace art diff --git a/runtime/method_handles_test.cc b/runtime/method_handles_test.cc index a9473421cb..0db9551265 100644 --- a/runtime/method_handles_test.cc +++ b/runtime/method_handles_test.cc @@ -17,6 +17,7 @@ #include "method_handles.h" #include "class_linker-inl.h" +#include "class_root.h" #include "common_runtime_test.h" #include "handle_scope-inl.h" #include "jvalue-inl.h" @@ -49,12 +50,11 @@ namespace { REQUIRES_SHARED(Locks::mutator_lock_) { ClassLinker* cl = Runtime::Current()->GetClassLinker(); StackHandleScope<2> hs(self); - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> class_array_type = cl->FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> class_array_type = GetClassRoot<mirror::ObjectArray<mirror::Class>>(cl); auto parameter_types = hs.NewHandle( mirror::ObjectArray<mirror::Class>::Alloc(self, class_array_type, 1)); parameter_types->Set(0, parameter_type.Get()); - Handle<mirror::Class> void_class = hs.NewHandle(cl->FindPrimitiveClass('V')); + Handle<mirror::Class> void_class = hs.NewHandle(GetClassRoot(ClassRoot::kPrimitiveVoid, cl)); return mirror::MethodType::Create(self, void_class, parameter_types); } diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index ab50973e89..5328ad979f 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -145,6 +145,7 @@ inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(PointerSize GetDirectMethodsStartOffset(), GetCopiedMethodsStartOffset()); } + template<VerifyObjectFlags kVerifyFlags> inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(PointerSize pointer_size) { DCHECK(IsLoaded() || IsErroneous()); @@ -281,8 +282,7 @@ inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, PointerSize pointer return &GetVirtualMethodsSliceUnchecked(pointer_size)[i]; } -template<VerifyObjectFlags kVerifyFlags, - ReadBarrierOption kReadBarrierOption> +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> inline PointerArray* Class::GetVTable() { DCHECK(IsLoaded<kVerifyFlags>() || IsErroneous<kVerifyFlags>()); return GetFieldObject<PointerArray, kVerifyFlags, kReadBarrierOption>( @@ -302,8 +302,7 @@ inline bool Class::HasVTable() { return GetVTable() != nullptr || ShouldHaveEmbeddedVTable(); } - template<VerifyObjectFlags kVerifyFlags, - ReadBarrierOption kReadBarrierOption> +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> inline int32_t Class::GetVTableLength() { if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) { return GetEmbeddedVTableLength(); @@ -312,15 +311,15 @@ inline int32_t Class::GetVTableLength() { GetVTable<kVerifyFlags, kReadBarrierOption>()->GetLength() : 0; } - template<VerifyObjectFlags kVerifyFlags, - ReadBarrierOption kReadBarrierOption> +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> inline ArtMethod* Class::GetVTableEntry(uint32_t i, PointerSize pointer_size) { if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) { return GetEmbeddedVTableEntry(i, pointer_size); } auto* vtable = GetVTable<kVerifyFlags, kReadBarrierOption>(); DCHECK(vtable != nullptr); - return vtable->template GetElementPtrSize<ArtMethod*, kVerifyFlags, kReadBarrierOption>(i, pointer_size); + return vtable->template GetElementPtrSize<ArtMethod*, kVerifyFlags, kReadBarrierOption>( + i, pointer_size); } inline int32_t Class::GetEmbeddedVTableLength() { @@ -410,7 +409,7 @@ inline void Class::SetObjectSize(uint32_t new_object_size) { // Object[] = int[] --> false // inline bool Class::IsArrayAssignableFromArray(ObjPtr<Class> src) { - DCHECK(IsArrayClass()) << PrettyClass(); + DCHECK(IsArrayClass()) << PrettyClass(); DCHECK(src->IsArrayClass()) << src->PrettyClass(); return GetComponentType()->IsAssignableFrom(src->GetComponentType()); } @@ -622,16 +621,14 @@ inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* metho return FindVirtualMethodForVirtual(method, pointer_size); } -template<VerifyObjectFlags kVerifyFlags, - ReadBarrierOption kReadBarrierOption> +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> inline IfTable* Class::GetIfTable() { ObjPtr<IfTable> ret = GetFieldObject<IfTable, kVerifyFlags, kReadBarrierOption>(IfTableOffset()); DCHECK(ret != nullptr) << PrettyClass(this); return ret.Ptr(); } -template<VerifyObjectFlags kVerifyFlags, - ReadBarrierOption kReadBarrierOption> +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> inline int32_t Class::GetIfTableCount() { return GetIfTable<kVerifyFlags, kReadBarrierOption>()->Count(); } @@ -734,7 +731,7 @@ inline String* Class::GetName() { } inline void Class::SetName(ObjPtr<String> name) { - SetFieldObjectTransaction(OFFSET_OF_OBJECT_MEMBER(Class, name_), name); + SetFieldObjectTransaction(OFFSET_OF_OBJECT_MEMBER(Class, name_), name); } template<VerifyObjectFlags kVerifyFlags> @@ -887,8 +884,8 @@ inline bool Class::DescriptorEquals(const char* match) { inline void Class::AssertInitializedOrInitializingInThread(Thread* self) { if (kIsDebugBuild && !IsInitialized()) { CHECK(IsInitializing()) << PrettyClass() << " is not initializing: " << GetStatus(); - CHECK_EQ(GetClinitThreadId(), self->GetTid()) << PrettyClass() - << " is initializing in a different thread"; + CHECK_EQ(GetClinitThreadId(), self->GetTid()) + << PrettyClass() << " is initializing in a different thread"; } } @@ -964,18 +961,15 @@ inline ArraySlice<ArtMethod> Class::GetDirectMethods(PointerSize pointer_size) { return GetDirectMethodsSliceUnchecked(pointer_size); } -inline ArraySlice<ArtMethod> Class::GetDeclaredMethods( - PointerSize pointer_size) { +inline ArraySlice<ArtMethod> Class::GetDeclaredMethods(PointerSize pointer_size) { return GetDeclaredMethodsSliceUnchecked(pointer_size); } -inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethods( - PointerSize pointer_size) { +inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethods(PointerSize pointer_size) { return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size); } -inline ArraySlice<ArtMethod> Class::GetVirtualMethods( - PointerSize pointer_size) { +inline ArraySlice<ArtMethod> Class::GetVirtualMethods(PointerSize pointer_size) { CheckPointerSize(pointer_size); return GetVirtualMethodsSliceUnchecked(pointer_size); } diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index ad6b37b86a..fea77644cb 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -39,6 +39,7 @@ #include "object-refvisitor-inl.h" #include "object_array-inl.h" #include "object_lock.h" +#include "string-inl.h" #include "runtime.h" #include "thread.h" #include "throwable.h" @@ -1250,7 +1251,7 @@ ArtMethod* Class::GetDeclaredConstructor( uint32_t Class::Depth() { uint32_t depth = 0; - for (ObjPtr<Class> klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) { + for (ObjPtr<Class> cls = this; cls->GetSuperClass() != nullptr; cls = cls->GetSuperClass()) { depth++; } return depth; @@ -1460,12 +1461,12 @@ template<VerifyObjectFlags kVerifyFlags> void Class::GetAccessFlagsDCheck() { // circularity issue during loading the names of its members DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() || IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() || - this == String::GetJavaLangString()) + this == GetClassRoot<String>()) << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>() << " IsRetired=" << IsRetired<kVerifyFlags>() << " IsErroneous=" << IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() - << " IsString=" << (this == String::GetJavaLangString()) + << " IsString=" << (this == GetClassRoot<String>()) << " status= " << GetStatus<kVerifyFlags>() << " descriptor=" << PrettyDescriptor(); } diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 7d5f539576..cf2d5a438c 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -933,12 +933,10 @@ class MANAGED Class FINAL : public Object { ArtMethod* FindConstructor(const StringPiece& signature, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); - ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, - PointerSize pointer_size) + ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); - ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, - PointerSize pointer_size) + ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); ArtMethod* FindClassInitializer(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_); @@ -1197,10 +1195,7 @@ class MANAGED Class FINAL : public Object { void AssertInitializedOrInitializingInThread(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); - Class* CopyOf(Thread* self, - int32_t new_length, - ImTable* imt, - PointerSize pointer_size) + Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // For proxy class only. diff --git a/runtime/mirror/method_type.cc b/runtime/mirror/method_type.cc index a8be8b7019..483fff5b26 100644 --- a/runtime/mirror/method_type.cc +++ b/runtime/mirror/method_type.cc @@ -28,9 +28,7 @@ namespace { ObjPtr<ObjectArray<Class>> AllocatePTypesArray(Thread* self, int count) REQUIRES_SHARED(Locks::mutator_lock_) { - ObjPtr<Class> class_type = Class::GetJavaLangClass(); - ObjPtr<Class> class_array_type = - Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_type); + ObjPtr<Class> class_array_type = GetClassRoot<mirror::ObjectArray<mirror::Class>>(); return ObjectArray<Class>::Alloc(self, class_array_type, count); } diff --git a/runtime/mirror/method_type_test.cc b/runtime/mirror/method_type_test.cc index 16bfc73e04..2bdea72f14 100644 --- a/runtime/mirror/method_type_test.cc +++ b/runtime/mirror/method_type_test.cc @@ -22,6 +22,7 @@ #include "class-inl.h" #include "class_linker-inl.h" #include "class_loader.h" +#include "class_root.h" #include "common_runtime_test.h" #include "handle_scope-inl.h" #include "object_array-inl.h" @@ -53,8 +54,8 @@ static mirror::MethodType* CreateMethodType(const std::string& return_type, soa.Self(), FullyQualifiedType(return_type).c_str(), boot_class_loader)); CHECK(return_clazz != nullptr); - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> class_array_type = class_linker->FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> class_array_type = + GetClassRoot<mirror::ObjectArray<mirror::Class>>(class_linker); Handle<mirror::ObjectArray<mirror::Class>> param_classes = hs.NewHandle( mirror::ObjectArray<mirror::Class>::Alloc(self, class_array_type, param_types.size())); diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index c7561f4278..cd822c244e 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -37,7 +37,7 @@ #include "read_barrier-inl.h" #include "reference.h" #include "runtime.h" -#include "string-inl.h" +#include "string.h" #include "throwable.h" namespace art { @@ -412,17 +412,21 @@ inline int8_t Object::GetFieldByteVolatile(MemberOffset field_offset) { return GetFieldByte<kVerifyFlags, true>(field_offset); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } if (kTransactionActive) { - Runtime::Current()->RecordWriteFieldBoolean(this, field_offset, - GetFieldBoolean<kVerifyFlags, kIsVolatile>(field_offset), - kIsVolatile); + Runtime::Current()->RecordWriteFieldBoolean( + this, + field_offset, + GetFieldBoolean<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); } if (kVerifyFlags & kVerifyThis) { VerifyObject(this); @@ -430,17 +434,20 @@ inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value SetField<uint8_t, kIsVolatile>(field_offset, new_value); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } if (kTransactionActive) { - Runtime::Current()->RecordWriteFieldByte(this, field_offset, - GetFieldByte<kVerifyFlags, kIsVolatile>(field_offset), - kIsVolatile); + Runtime::Current()->RecordWriteFieldByte(this, + field_offset, + GetFieldByte<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); } if (kVerifyFlags & kVerifyThis) { VerifyObject(this); @@ -486,16 +493,19 @@ inline int16_t Object::GetFieldShortVolatile(MemberOffset field_offset) { return GetFieldShort<kVerifyFlags, true>(field_offset); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetFieldChar(MemberOffset field_offset, uint16_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } if (kTransactionActive) { - Runtime::Current()->RecordWriteFieldChar(this, field_offset, - GetFieldChar<kVerifyFlags, kIsVolatile>(field_offset), - kIsVolatile); + Runtime::Current()->RecordWriteFieldChar(this, + field_offset, + GetFieldChar<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); } if (kVerifyFlags & kVerifyThis) { VerifyObject(this); @@ -503,16 +513,19 @@ inline void Object::SetFieldChar(MemberOffset field_offset, uint16_t new_value) SetField<uint16_t, kIsVolatile>(field_offset, new_value); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetFieldShort(MemberOffset field_offset, int16_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } if (kTransactionActive) { - Runtime::Current()->RecordWriteFieldChar(this, field_offset, - GetFieldShort<kVerifyFlags, kIsVolatile>(field_offset), - kIsVolatile); + Runtime::Current()->RecordWriteFieldChar(this, + field_offset, + GetFieldShort<kVerifyFlags, kIsVolatile>(field_offset), + kIsVolatile); } if (kVerifyFlags & kVerifyThis) { VerifyObject(this); @@ -532,14 +545,17 @@ inline void Object::SetFieldShortVolatile(MemberOffset field_offset, int16_t new field_offset, new_value); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } if (kTransactionActive) { - Runtime::Current()->RecordWriteField32(this, field_offset, + Runtime::Current()->RecordWriteField32(this, + field_offset, GetField32<kVerifyFlags, kIsVolatile>(field_offset), kIsVolatile); } @@ -567,7 +583,8 @@ inline void Object::SetField32Transaction(MemberOffset field_offset, int32_t new template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset, - int32_t old_value, int32_t new_value) { + int32_t old_value, + int32_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -585,7 +602,8 @@ inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offs template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakAcquire32(MemberOffset field_offset, - int32_t old_value, int32_t new_value) { + int32_t old_value, + int32_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -603,7 +621,8 @@ inline bool Object::CasFieldWeakAcquire32(MemberOffset field_offset, template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset, - int32_t old_value, int32_t new_value) { + int32_t old_value, + int32_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -621,7 +640,8 @@ inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset, template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, - int32_t old_value, int32_t new_value) { + int32_t old_value, + int32_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -637,14 +657,17 @@ inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_of return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetField64(MemberOffset field_offset, int64_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } if (kTransactionActive) { - Runtime::Current()->RecordWriteField64(this, field_offset, + Runtime::Current()->RecordWriteField64(this, + field_offset, GetField64<kVerifyFlags, kIsVolatile>(field_offset), kIsVolatile); } @@ -678,7 +701,8 @@ inline kSize Object::GetFieldAcquire(MemberOffset field_offset) { template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, - int64_t old_value, int64_t new_value) { + int64_t old_value, + int64_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -695,7 +719,8 @@ inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offs template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> inline bool Object::CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, - int64_t old_value, int64_t new_value) { + int64_t old_value, + int64_t new_value) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -710,7 +735,9 @@ inline bool Object::CasFieldStrongSequentiallyConsistent64(MemberOffset field_of return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value); } -template<class T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, +template<class T, + VerifyObjectFlags kVerifyFlags, + ReadBarrierOption kReadBarrierOption, bool kIsVolatile> inline T* Object::GetFieldObject(MemberOffset field_offset) { if (kVerifyFlags & kVerifyThis) { @@ -733,8 +760,10 @@ inline T* Object::GetFieldObjectVolatile(MemberOffset field_offset) { return GetFieldObject<T, kVerifyFlags, kReadBarrierOption, true>(field_offset); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr<Object> new_value) { if (kCheckTransaction) { @@ -760,8 +789,10 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, objref_addr->Assign<kIsVolatile>(new_value.Ptr()); } -template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, - bool kIsVolatile> +template<bool kTransactionActive, + bool kCheckTransaction, + VerifyObjectFlags kVerifyFlags, + bool kIsVolatile> inline void Object::SetFieldObject(MemberOffset field_offset, ObjPtr<Object> new_value) { SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags, kIsVolatile>(field_offset, new_value); diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index 0e03e3741c..4240e702b5 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -271,7 +271,7 @@ void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr<Object> } } LOG(FATAL) << "Failed to find field for assignment to " << reinterpret_cast<void*>(this) - << " of type " << c->PrettyDescriptor() << " at offset " << field_offset; + << " of type " << c->PrettyDescriptor() << " at offset " << field_offset; UNREACHABLE(); } diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 82045c7b66..8584b8a56f 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -282,13 +282,16 @@ class MANAGED LOCKABLE Object { bool IsPhantomReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_); // Accessor for Java type fields. - template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, - ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false> + template<class T, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + ReadBarrierOption kReadBarrierOption = kWithReadBarrier, + bool kIsVolatile = false> ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); - template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, - ReadBarrierOption kReadBarrierOption = kWithReadBarrier> + template<class T, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); @@ -310,11 +313,11 @@ class MANAGED LOCKABLE Object { template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, - ObjPtr<Object> new_value) + ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + template<bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldObjectTransaction(MemberOffset field_offset, ObjPtr<Object> new_value) REQUIRES_SHARED(Locks::mutator_lock_); @@ -416,23 +419,29 @@ class MANAGED LOCKABLE Object { ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); @@ -452,23 +461,29 @@ class MANAGED LOCKABLE Object { ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); @@ -487,13 +502,16 @@ class MANAGED LOCKABLE Object { return GetField32<kVerifyFlags, true>(field_offset); } - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); @@ -503,34 +521,44 @@ class MANAGED LOCKABLE Object { ALWAYS_INLINE void SetField32Transaction(MemberOffset field_offset, int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset, - int32_t old_value, int32_t new_value) + int32_t old_value, + int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value, - int32_t new_value) ALWAYS_INLINE + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE bool CasFieldWeakRelaxed32(MemberOffset field_offset, + int32_t old_value, + int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldWeakAcquire32(MemberOffset field_offset, int32_t old_value, - int32_t new_value) ALWAYS_INLINE + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE bool CasFieldWeakAcquire32(MemberOffset field_offset, + int32_t old_value, + int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value, - int32_t new_value) ALWAYS_INLINE + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE bool CasFieldWeakRelease32(MemberOffset field_offset, + int32_t old_value, + int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, - int32_t new_value) ALWAYS_INLINE + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ALWAYS_INLINE bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, + int32_t old_value, + int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> @@ -548,13 +576,16 @@ class MANAGED LOCKABLE Object { return GetField64<kVerifyFlags, true>(field_offset); } - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + bool kIsVolatile = false> ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); @@ -564,35 +595,45 @@ class MANAGED LOCKABLE Object { ALWAYS_INLINE void SetField64Transaction(MemberOffset field_offset, int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, + int64_t old_value, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, + int64_t old_value, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + typename T> void SetFieldPtr(MemberOffset field_offset, T new_value) REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>( field_offset, new_value, kRuntimePointerSize); } - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + typename T> void SetFieldPtr64(MemberOffset field_offset, T new_value) REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>( field_offset, new_value, 8u); } - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + typename T> ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value, PointerSize pointer_size) @@ -628,28 +669,34 @@ class MANAGED LOCKABLE Object { // Update methods that expose the raw address of a primitive value-type to an Accessor instance // that will attempt to update the field. These are used by VarHandle accessor methods to // atomically update fields with a wider range of memory orderings than usually required. - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void UpdateFieldBooleanViaAccessor(MemberOffset field_offset, Accessor<uint8_t>* accessor) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void UpdateFieldByteViaAccessor(MemberOffset field_offset, Accessor<int8_t>* accessor) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void UpdateFieldCharViaAccessor(MemberOffset field_offset, Accessor<uint16_t>* accessor) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void UpdateFieldShortViaAccessor(MemberOffset field_offset, Accessor<int16_t>* accessor) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void UpdateField32ViaAccessor(MemberOffset field_offset, Accessor<int32_t>* accessor) REQUIRES_SHARED(Locks::mutator_lock_); - template<bool kTransactionActive, bool kCheckTransaction = true, - VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + template<bool kTransactionActive, + bool kCheckTransaction = true, + VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void UpdateField64ViaAccessor(MemberOffset field_offset, Accessor<int64_t>* accessor) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index a60861cc28..8fa2c6cf7f 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -25,6 +25,7 @@ #include "base/globals.h" #include "base/utils.h" #include "class.h" +#include "class_root.h" #include "common_throws.h" #include "dex/utf.h" #include "gc/heap-inl.h" @@ -194,21 +195,6 @@ int32_t String::FastIndexOf(MemoryType* chars, int32_t ch, int32_t start) { return -1; } -template<VerifyObjectFlags kVerifyFlags> -inline size_t String::SizeOf() { - size_t size = sizeof(String); - if (IsCompressed()) { - size += (sizeof(uint8_t) * GetLength<kVerifyFlags>()); - } else { - size += (sizeof(uint16_t) * GetLength<kVerifyFlags>()); - } - // String.equals() intrinsics assume zero-padding up to kObjectAlignment, - // so make sure the zero-padding is actually copied around if GC compaction - // chooses to copy only SizeOf() bytes. - // http://b/23528461 - return RoundUp(size, kObjectAlignment); -} - template <bool kIsInstrumented, typename PreFenceVisitor> inline String* String::Alloc(Thread* self, int32_t utf16_length_with_flag, gc::AllocatorType allocator_type, @@ -226,7 +212,8 @@ inline String* String::Alloc(Thread* self, int32_t utf16_length_with_flag, // http://b/23528461 size_t alloc_size = RoundUp(size, kObjectAlignment); - Class* string_class = GetJavaLangString(); + Runtime* runtime = Runtime::Current(); + ObjPtr<Class> string_class = GetClassRoot<String>(runtime->GetClassLinker()); // Check for overflow and throw OutOfMemoryError if this was an unreasonable request. // Do this by comparing with the maximum length that will _not_ cause an overflow. const size_t overflow_length = (-header_size) / block_size; // Unsigned arithmetic. @@ -242,7 +229,7 @@ inline String* String::Alloc(Thread* self, int32_t utf16_length_with_flag, return nullptr; } - gc::Heap* heap = Runtime::Current()->GetHeap(); + gc::Heap* heap = runtime->GetHeap(); return down_cast<String*>( heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, string_class, alloc_size, allocator_type, pre_fence_visitor)); diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc index 6208a962e5..b76ca1968a 100644 --- a/runtime/mirror/string.cc +++ b/runtime/mirror/string.cc @@ -35,9 +35,6 @@ namespace art { namespace mirror { -// TODO: get global references for these -GcRoot<Class> String::java_lang_String_; - int32_t String::FastIndexOf(int32_t ch, int32_t start) { int32_t count = GetLength(); if (start < 0) { @@ -52,18 +49,6 @@ int32_t String::FastIndexOf(int32_t ch, int32_t start) { } } -void String::SetClass(ObjPtr<Class> java_lang_String) { - CHECK(java_lang_String_.IsNull()); - CHECK(java_lang_String != nullptr); - CHECK(java_lang_String->IsStringClass()); - java_lang_String_ = GcRoot<Class>(java_lang_String); -} - -void String::ResetClass() { - CHECK(!java_lang_String_.IsNull()); - java_lang_String_ = GcRoot<Class>(nullptr); -} - int String::ComputeHashCode() { int32_t hash_code = 0; if (IsCompressed()) { @@ -372,10 +357,6 @@ int32_t String::CompareTo(ObjPtr<String> rhs) { return count_diff; } -void String::VisitRoots(RootVisitor* visitor) { - java_lang_String_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass)); -} - CharArray* String::ToCharArray(Thread* self) { StackHandleScope<1> hs(self); Handle<String> string(hs.NewHandle(this)); diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h index c45dc499e5..598175b749 100644 --- a/runtime/mirror/string.h +++ b/runtime/mirror/string.h @@ -17,6 +17,8 @@ #ifndef ART_RUNTIME_MIRROR_STRING_H_ #define ART_RUNTIME_MIRROR_STRING_H_ +#include "base/bit_utils.h" +#include "base/globals.h" #include "gc/allocator_type.h" #include "gc_root-inl.h" #include "class.h" @@ -66,7 +68,19 @@ class MANAGED String FINAL : public Object { } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_); + size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_) { + size_t size = sizeof(String); + if (IsCompressed()) { + size += (sizeof(uint8_t) * GetLength<kVerifyFlags>()); + } else { + size += (sizeof(uint16_t) * GetLength<kVerifyFlags>()); + } + // String.equals() intrinsics assume zero-padding up to kObjectAlignment, + // so make sure the zero-padding is actually copied around if GC compaction + // chooses to copy only SizeOf() bytes. + // http://b/23528461 + return RoundUp(size, kObjectAlignment); + } // Taking out the first/uppermost bit because it is not part of actual length value template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> @@ -214,15 +228,6 @@ class MANAGED String FINAL : public Object { : length; } - static Class* GetJavaLangString() REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK(!java_lang_String_.IsNull()); - return java_lang_String_.Read(); - } - - static void SetClass(ObjPtr<Class> java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_); - static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_); - static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); - // Returns a human-readable equivalent of 'descriptor'. So "I" would be "int", // "[[I" would be "int[][]", "[Ljava/lang/String;" would be // "java.lang.String[]", and so forth. @@ -267,10 +272,7 @@ class MANAGED String FINAL : public Object { uint8_t value_compressed_[0]; }; - static GcRoot<Class> java_lang_String_; - friend struct art::StringOffsets; // for verifying offset information - ART_FRIEND_TEST(art::StubTest, ReadBarrierForRoot); // For java_lang_String_. DISALLOW_IMPLICIT_CONSTRUCTORS(String); }; diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc index 8311d911cc..71f41b9d12 100644 --- a/runtime/mirror/var_handle.cc +++ b/runtime/mirror/var_handle.cc @@ -27,6 +27,7 @@ #include "jvalue-inl.h" #include "method_handles-inl.h" #include "method_type.h" +#include "obj_ptr-inl.h" #include "well_known_classes.h" namespace art { @@ -266,31 +267,22 @@ int32_t BuildParameterArray(ObjPtr<Class> (¶meters)[VarHandle::kMaxAccessorP // Returns the return type associated with an AccessModeTemplate based // on the template and the variable type specified. -Class* GetReturnType(AccessModeTemplate access_mode_template, ObjPtr<Class> varType) +static ObjPtr<Class> GetReturnType(AccessModeTemplate access_mode_template, ObjPtr<Class> varType) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(varType != nullptr); switch (access_mode_template) { case AccessModeTemplate::kCompareAndSet: - return Runtime::Current()->GetClassLinker()->FindPrimitiveClass('Z'); + return GetClassRoot(ClassRoot::kPrimitiveBoolean); case AccessModeTemplate::kCompareAndExchange: case AccessModeTemplate::kGet: case AccessModeTemplate::kGetAndUpdate: - return varType.Ptr(); + return varType; case AccessModeTemplate::kSet: - return Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'); + return GetClassRoot(ClassRoot::kPrimitiveVoid); } return nullptr; } -ObjectArray<Class>* NewArrayOfClasses(Thread* self, int count) - REQUIRES_SHARED(Locks::mutator_lock_) { - Runtime* const runtime = Runtime::Current(); - ClassLinker* const class_linker = runtime->GetClassLinker(); - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> array_of_class = class_linker->FindArrayClass(self, &class_type); - return ObjectArray<Class>::Alloc(Thread::Current(), array_of_class, count); -} - // Method to insert a read barrier for accessors to reference fields. inline void ReadBarrierForVarHandleAccess(ObjPtr<Object> obj, MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -1410,15 +1402,15 @@ class ByteArrayViewAccessor { } // namespace -Class* VarHandle::GetVarType() { +ObjPtr<Class> VarHandle::GetVarType() { return GetFieldObject<Class>(VarTypeOffset()); } -Class* VarHandle::GetCoordinateType0() { +ObjPtr<Class> VarHandle::GetCoordinateType0() { return GetFieldObject<Class>(CoordinateType0Offset()); } -Class* VarHandle::GetCoordinateType1() { +ObjPtr<Class> VarHandle::GetCoordinateType1() { return GetFieldObject<Class>(CoordinateType1Offset()); } @@ -1438,7 +1430,7 @@ VarHandle::MatchKind VarHandle::GetMethodTypeMatchForAccessMode(AccessMode acces // Check return type first. If the return type of the method // of the VarHandle is immaterial. if (mt_rtype->GetPrimitiveType() != Primitive::Type::kPrimVoid) { - ObjPtr<Class> vh_rtype = GetReturnType(access_mode_template, var_type.Ptr()); + ObjPtr<Class> vh_rtype = GetReturnType(access_mode_template, var_type); if (vh_rtype != mt_rtype) { if (!IsReturnTypeConvertible(vh_rtype, mt_rtype)) { return MatchKind::kNone; @@ -1513,9 +1505,9 @@ bool VarHandle::IsInvokerMethodTypeCompatible(AccessMode access_mode, return true; } -MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self, - ObjPtr<VarHandle> var_handle, - AccessMode access_mode) { +ObjPtr<MethodType> VarHandle::GetMethodTypeForAccessMode(Thread* self, + ObjPtr<VarHandle> var_handle, + AccessMode access_mode) { // This is a static method as the var_handle might be moved by the GC during it's execution. AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode); @@ -1525,7 +1517,9 @@ MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self, const int32_t ptypes_count = GetNumberOfParameters(access_mode_template, vh->GetCoordinateType0(), vh->GetCoordinateType1()); - Handle<ObjectArray<Class>> ptypes = hs.NewHandle(NewArrayOfClasses(self, ptypes_count)); + ObjPtr<Class> array_of_class = GetClassRoot<ObjectArray<Class>>(); + Handle<ObjectArray<Class>> ptypes = + hs.NewHandle(ObjectArray<Class>::Alloc(Thread::Current(), array_of_class, ptypes_count)); if (ptypes == nullptr) { return nullptr; } @@ -1537,12 +1531,12 @@ MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self, vh->GetCoordinateType0(), vh->GetCoordinateType1()); for (int32_t i = 0; i < ptypes_count; ++i) { - ptypes->Set(i, ptypes_array[i].Ptr()); + ptypes->Set(i, ptypes_array[i]); } return MethodType::Create(self, rtype, ptypes); } -MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self, AccessMode access_mode) { +ObjPtr<MethodType> VarHandle::GetMethodTypeForAccessMode(Thread* self, AccessMode access_mode) { return GetMethodTypeForAccessMode(self, this, access_mode); } diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h index 9829456854..48c9d74e30 100644 --- a/runtime/mirror/var_handle.h +++ b/runtime/mirror/var_handle.h @@ -26,6 +26,7 @@ namespace art { template<class T> class Handle; class InstructionOperands; +template<class T> class ObjPtr; enum class Intrinsics; @@ -120,7 +121,7 @@ class MANAGED VarHandle : public Object { // AccessMode. No check is made for whether the AccessMode is a // supported operation so the MethodType can be used when raising a // WrongMethodTypeException exception. - MethodType* GetMethodTypeForAccessMode(Thread* self, AccessMode accessMode) + ObjPtr<MethodType> GetMethodTypeForAccessMode(Thread* self, AccessMode accessMode) REQUIRES_SHARED(Locks::mutator_lock_); // Returns a string representing the descriptor of the MethodType associated with @@ -135,7 +136,7 @@ class MANAGED VarHandle : public Object { REQUIRES_SHARED(Locks::mutator_lock_); // Gets the variable type that is operated on by this VarHandle instance. - Class* GetVarType() REQUIRES_SHARED(Locks::mutator_lock_); + ObjPtr<Class> GetVarType() REQUIRES_SHARED(Locks::mutator_lock_); // Gets the return type descriptor for a named accessor method, // nullptr if accessor_method is not supported. @@ -149,13 +150,13 @@ class MANAGED VarHandle : public Object { static bool GetAccessModeByMethodName(const char* method_name, AccessMode* access_mode); private: - Class* GetCoordinateType0() REQUIRES_SHARED(Locks::mutator_lock_); - Class* GetCoordinateType1() REQUIRES_SHARED(Locks::mutator_lock_); + ObjPtr<Class> GetCoordinateType0() REQUIRES_SHARED(Locks::mutator_lock_); + ObjPtr<Class> GetCoordinateType1() REQUIRES_SHARED(Locks::mutator_lock_); int32_t GetAccessModesBitMask() REQUIRES_SHARED(Locks::mutator_lock_); - static MethodType* GetMethodTypeForAccessMode(Thread* self, - ObjPtr<VarHandle> var_handle, - AccessMode access_mode) + static ObjPtr<MethodType> GetMethodTypeForAccessMode(Thread* self, + ObjPtr<VarHandle> var_handle, + AccessMode access_mode) REQUIRES_SHARED(Locks::mutator_lock_); static MemberOffset VarTypeOffset() { diff --git a/runtime/mirror/var_handle_test.cc b/runtime/mirror/var_handle_test.cc index 2c1283225d..9df96ddbd1 100644 --- a/runtime/mirror/var_handle_test.cc +++ b/runtime/mirror/var_handle_test.cc @@ -92,8 +92,7 @@ class VarHandleTest : public CommonRuntimeTest { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Handle<Class> var_type = hs.NewHandle(view_array_class->GetComponentType()); Handle<Class> index_type = hs.NewHandle(class_linker->FindPrimitiveClass('I')); - ObjPtr<mirror::Class> byte_class = class_linker->FindPrimitiveClass('B'); - Handle<Class> byte_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &byte_class))); + Handle<Class> byte_array_class(hs.NewHandle(GetClassRoot<mirror::ByteArray>())); InitializeVarHandle(bvh.Get(), var_type, byte_array_class, index_type, access_modes_bit_mask); bvh->SetFieldBoolean<false>(ByteArrayViewVarHandle::NativeByteOrderOffset(), native_byte_order); return bvh.Get(); @@ -131,17 +130,17 @@ class VarHandleTest : public CommonRuntimeTest { } // Helper to get the VarType of a VarHandle. - static Class* GetVarType(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) { + static ObjPtr<Class> GetVarType(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) { return vh->GetVarType(); } // Helper to get the CoordinateType0 of a VarHandle. - static Class* GetCoordinateType0(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) { + static ObjPtr<Class> GetCoordinateType0(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) { return vh->GetCoordinateType0(); } // Helper to get the CoordinateType1 of a VarHandle. - static Class* GetCoordinateType1(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) { + static ObjPtr<Class> GetCoordinateType1(VarHandle* vh) REQUIRES_SHARED(Locks::mutator_lock_) { return vh->GetCoordinateType1(); } @@ -151,7 +150,7 @@ class VarHandleTest : public CommonRuntimeTest { } private: - static void InitializeVarHandle(VarHandle* vh, + static void InitializeVarHandle(ObjPtr<VarHandle> vh, Handle<Class> var_type, int32_t access_modes_bit_mask) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -159,7 +158,7 @@ class VarHandleTest : public CommonRuntimeTest { vh->SetField32<false>(VarHandle::AccessModesBitMaskOffset(), access_modes_bit_mask); } - static void InitializeVarHandle(VarHandle* vh, + static void InitializeVarHandle(ObjPtr<VarHandle> vh, Handle<Class> var_type, Handle<Class> coordinate_type0, int32_t access_modes_bit_mask) @@ -168,7 +167,7 @@ class VarHandleTest : public CommonRuntimeTest { vh->SetFieldObject<false>(VarHandle::CoordinateType0Offset(), coordinate_type0.Get()); } - static void InitializeVarHandle(VarHandle* vh, + static void InitializeVarHandle(ObjPtr<VarHandle> vh, Handle<Class> var_type, Handle<Class> coordinate_type0, Handle<Class> coordinate_type1, @@ -234,8 +233,7 @@ static MethodType* MethodTypeOf(const std::string& method_descriptor) { ScopedObjectAccess soa(self); StackHandleScope<3> hs(self); int ptypes_count = static_cast<int>(descriptors.size()) - 1; - ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> array_of_class = class_linker->FindArrayClass(self, &class_type); + ObjPtr<mirror::Class> array_of_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(); Handle<ObjectArray<Class>> ptypes = hs.NewHandle( ObjectArray<Class>::Alloc(Thread::Current(), array_of_class, ptypes_count)); Handle<mirror::ClassLoader> boot_class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr); @@ -599,10 +597,10 @@ TEST_F(VarHandleTest, ArrayElementVarHandle) { VarHandle::AccessMode::kGetAndBitwiseXorRelease, VarHandle::AccessMode::kGetAndBitwiseXorAcquire); - ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString(); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Handle<Class> string_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &string_class))); - Handle<mirror::ArrayElementVarHandle> vh(hs.NewHandle(CreateArrayElementVarHandle(self, string_array_class, mask))); + Handle<mirror::Class> string_array_class = hs.NewHandle( + GetClassRoot<mirror::ObjectArray<mirror::String>>()); + Handle<mirror::ArrayElementVarHandle> vh( + hs.NewHandle(CreateArrayElementVarHandle(self, string_array_class, mask))); EXPECT_FALSE(vh.IsNull()); // Check access modes @@ -746,11 +744,10 @@ TEST_F(VarHandleTest, ByteArrayViewVarHandle) { VarHandle::AccessMode::kGetAndBitwiseXor, VarHandle::AccessMode::kGetAndBitwiseXorAcquire); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ObjPtr<mirror::Class> char_class = class_linker->FindPrimitiveClass('C'); - Handle<Class> char_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &char_class))); + Handle<Class> char_array_class(hs.NewHandle(GetClassRoot<mirror::CharArray>())); const bool native_byte_order = true; - Handle<mirror::ByteArrayViewVarHandle> vh(hs.NewHandle(CreateByteArrayViewVarHandle(self, char_array_class, native_byte_order, mask))); + Handle<mirror::ByteArrayViewVarHandle> vh( + hs.NewHandle(CreateByteArrayViewVarHandle(self, char_array_class, native_byte_order, mask))); EXPECT_FALSE(vh.IsNull()); EXPECT_EQ(native_byte_order, vh->GetNativeByteOrder()); @@ -895,11 +892,10 @@ TEST_F(VarHandleTest, ByteBufferViewVarHandle) { VarHandle::AccessMode::kGetAndBitwiseXor, VarHandle::AccessMode::kGetAndBitwiseXorAcquire); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ObjPtr<mirror::Class> double_class = class_linker->FindPrimitiveClass('D'); - Handle<Class> double_array_class(hs.NewHandle(class_linker->FindArrayClass(self, &double_class))); + Handle<Class> double_array_class(hs.NewHandle(GetClassRoot<mirror::DoubleArray>())); const bool native_byte_order = false; - Handle<mirror::ByteBufferViewVarHandle> vh(hs.NewHandle(CreateByteBufferViewVarHandle(self, double_array_class, native_byte_order, mask))); + Handle<mirror::ByteBufferViewVarHandle> vh(hs.NewHandle( + CreateByteBufferViewVarHandle(self, double_array_class, native_byte_order, mask))); EXPECT_FALSE(vh.IsNull()); EXPECT_EQ(native_byte_order, vh->GetNativeByteOrder()); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 6c820190b4..c9deb526c2 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -32,6 +32,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version); #include "class_linker-inl.h" #include "common_throws.h" #include "debugger.h" +#include "dex/class_accessor-inl.h" #include "dex/dex_file-inl.h" #include "dex/dex_file_types.h" #include "gc/accounting/card_table-inl.h" @@ -573,30 +574,12 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) { } if (kPreloadDexCachesFieldsAndMethods) { - for (size_t class_def_index = 0; - class_def_index < dex_file->NumClassDefs(); - class_def_index++) { - const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index); - const uint8_t* class_data = dex_file->GetClassData(class_def); - if (class_data == nullptr) { - continue; + for (ClassAccessor accessor : dex_file->GetClasses()) { + for (const ClassAccessor::Field& field : accessor.GetFields()) { + PreloadDexCachesResolveField(dex_cache, field.GetIndex(), field.IsStatic()); } - ClassDataItemIterator it(*dex_file, class_data); - for (; it.HasNextStaticField(); it.Next()) { - uint32_t field_idx = it.GetMemberIndex(); - PreloadDexCachesResolveField(dex_cache, field_idx, true); - } - for (; it.HasNextInstanceField(); it.Next()) { - uint32_t field_idx = it.GetMemberIndex(); - PreloadDexCachesResolveField(dex_cache, field_idx, false); - } - for (; it.HasNextDirectMethod(); it.Next()) { - uint32_t method_idx = it.GetMemberIndex(); - PreloadDexCachesResolveMethod(dex_cache, method_idx); - } - for (; it.HasNextVirtualMethod(); it.Next()) { - uint32_t method_idx = it.GetMemberIndex(); - PreloadDexCachesResolveMethod(dex_cache, method_idx); + for (const ClassAccessor::Method& method : accessor.GetMethods()) { + PreloadDexCachesResolveMethod(dex_cache, method.GetIndex()); } } } diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index 261178b0ee..c6bdfa10c6 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -215,17 +215,9 @@ static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) { return soa.AddLocalReference<jstring>(mirror::Class::ComputeName(hs.NewHandle(c))); } -// TODO: Move this to mirror::Class ? Other mirror types that commonly appear -// as arrays have a GetArrayClass() method. -static ObjPtr<mirror::Class> GetClassArrayClass(Thread* self) - REQUIRES_SHARED(Locks::mutator_lock_) { - ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); - return Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_class); -} - static jobjectArray Class_getInterfacesInternal(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); - StackHandleScope<4> hs(soa.Self()); + StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis)); if (klass->IsProxyClass()) { @@ -237,10 +229,12 @@ static jobjectArray Class_getInterfacesInternal(JNIEnv* env, jobject javaThis) { return nullptr; } + ClassLinker* linker = Runtime::Current()->GetClassLinker(); const uint32_t num_ifaces = iface_list->Size(); - Handle<mirror::Class> class_array_class = hs.NewHandle(GetClassArrayClass(soa.Self())); - Handle<mirror::ObjectArray<mirror::Class>> ifaces = hs.NewHandle( - mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class.Get(), num_ifaces)); + ObjPtr<mirror::Class> class_array_class = + GetClassRoot<mirror::ObjectArray<mirror::Class>>(linker); + ObjPtr<mirror::ObjectArray<mirror::Class>> ifaces = + mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, num_ifaces); if (ifaces.IsNull()) { DCHECK(soa.Self()->IsExceptionPending()); return nullptr; @@ -250,20 +244,21 @@ static jobjectArray Class_getInterfacesInternal(JNIEnv* env, jobject javaThis) { // with kActiveTransaction == false. DCHECK(!Runtime::Current()->IsActiveTransaction()); - ClassLinker* linker = Runtime::Current()->GetClassLinker(); - MutableHandle<mirror::Class> interface(hs.NewHandle<mirror::Class>(nullptr)); for (uint32_t i = 0; i < num_ifaces; ++i) { const dex::TypeIndex type_idx = iface_list->GetTypeItem(i).type_idx_; - interface.Assign(linker->LookupResolvedType(type_idx, klass.Get())); - ifaces->SetWithoutChecks<false>(i, interface.Get()); + ObjPtr<mirror::Class> interface = linker->LookupResolvedType(type_idx, klass.Get()); + DCHECK(interface != nullptr); + ifaces->SetWithoutChecks<false>(i, interface); } - return soa.AddLocalReference<jobjectArray>(ifaces.Get()); + return soa.AddLocalReference<jobjectArray>(ifaces); } -static mirror::ObjectArray<mirror::Field>* GetDeclaredFields( - Thread* self, ObjPtr<mirror::Class> klass, bool public_only, bool force_resolve) - REQUIRES_SHARED(Locks::mutator_lock_) { +static ObjPtr<mirror::ObjectArray<mirror::Field>> GetDeclaredFields( + Thread* self, + ObjPtr<mirror::Class> klass, + bool public_only, + bool force_resolve) REQUIRES_SHARED(Locks::mutator_lock_) { StackHandleScope<1> hs(self); IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields(); IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields(); @@ -672,10 +667,8 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) { // Pending exception from GetDeclaredClasses. return nullptr; } - ObjPtr<mirror::Class> class_array_class = GetClassArrayClass(soa.Self()); - if (class_array_class == nullptr) { - return nullptr; - } + ObjPtr<mirror::Class> class_array_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(); + DCHECK(class_array_class != nullptr); ObjPtr<mirror::ObjectArray<mirror::Class>> empty_array = mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0); return soa.AddLocalReference<jobjectArray>(empty_array); diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc index 07e875efcb..3978ca8a36 100644 --- a/runtime/native/java_lang_StringFactory.cc +++ b/runtime/native/java_lang_StringFactory.cc @@ -19,7 +19,7 @@ #include "common_throws.h" #include "jni/jni_internal.h" #include "mirror/object-inl.h" -#include "mirror/string.h" +#include "mirror/string-inl.h" #include "native_util.h" #include "nativehelper/jni_macros.h" #include "nativehelper/scoped_local_ref.h" diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc index 0630737d29..42c7ad5650 100644 --- a/runtime/native/java_lang_VMClassLoader.cc +++ b/runtime/native/java_lang_VMClassLoader.cc @@ -20,6 +20,7 @@ #include "class_linker.h" #include "dex/descriptors_names.h" #include "dex/dex_file_loader.h" +#include "dex/utf.h" #include "jni/jni_internal.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" @@ -36,11 +37,11 @@ namespace art { // A class so we can be friends with ClassLinker and access internal methods. class VMClassLoader { public: - static mirror::Class* LookupClass(ClassLinker* cl, - Thread* self, - const char* descriptor, - size_t hash, - ObjPtr<mirror::ClassLoader> class_loader) + static ObjPtr<mirror::Class> LookupClass(ClassLinker* cl, + Thread* self, + const char* descriptor, + size_t hash, + ObjPtr<mirror::ClassLoader> class_loader) REQUIRES(!Locks::classlinker_classes_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { return cl->LookupClass(self, descriptor, hash, class_loader); diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc index 13a8d28267..a961cb2597 100644 --- a/runtime/native/java_lang_reflect_Constructor.cc +++ b/runtime/native/java_lang_reflect_Constructor.cc @@ -20,8 +20,8 @@ #include "art_method-inl.h" #include "base/enums.h" -#include "class_linker-inl.h" #include "class_linker.h" +#include "class_root.h" #include "dex/dex_file_annotations.h" #include "jni/jni_internal.h" #include "mirror/class-inl.h" @@ -42,12 +42,8 @@ static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMetho annotations::GetExceptionTypesForMethod(method); if (result_array == nullptr) { // Return an empty array instead of a null pointer. - ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> class_array_class = - Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class); - if (class_array_class == nullptr) { - return nullptr; - } + ObjPtr<mirror::Class> class_array_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(); + DCHECK(class_array_class != nullptr); ObjPtr<mirror::ObjectArray<mirror::Class>> empty_array = mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0); return soa.AddLocalReference<jobjectArray>(empty_array); diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc index 9a2d3020c0..a40cb9b2e6 100644 --- a/runtime/native/java_lang_reflect_Executable.cc +++ b/runtime/native/java_lang_reflect_Executable.cc @@ -20,6 +20,7 @@ #include "nativehelper/jni_macros.h" #include "art_method-inl.h" +#include "class_root.h" #include "dex/dex_file_annotations.h" #include "handle.h" #include "jni/jni_internal.h" @@ -335,15 +336,6 @@ static jclass Executable_getMethodReturnTypeInternal(JNIEnv* env, jobject javaMe return soa.AddLocalReference<jclass>(return_type); } -// TODO: Move this to mirror::Class ? Other mirror types that commonly appear -// as arrays have a GetArrayClass() method. This is duplicated in -// java_lang_Class.cc as well. -static ObjPtr<mirror::Class> GetClassArrayClass(Thread* self) - REQUIRES_SHARED(Locks::mutator_lock_) { - ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); - return Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_class); -} - static jobjectArray Executable_getParameterTypesInternal(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); @@ -356,10 +348,10 @@ static jobjectArray Executable_getParameterTypesInternal(JNIEnv* env, jobject ja const uint32_t num_params = params->Size(); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::Class> class_array_class = hs.NewHandle(GetClassArrayClass(soa.Self())); + StackHandleScope<2> hs(soa.Self()); + ObjPtr<mirror::Class> class_array_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(); Handle<mirror::ObjectArray<mirror::Class>> ptypes = hs.NewHandle( - mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class.Get(), num_params)); + mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, num_params)); if (ptypes.IsNull()) { DCHECK(soa.Self()->IsExceptionPending()); return nullptr; diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc index 52e04941c6..34455fe00f 100644 --- a/runtime/native/java_lang_reflect_Method.cc +++ b/runtime/native/java_lang_reflect_Method.cc @@ -22,6 +22,7 @@ #include "base/enums.h" #include "class_linker-inl.h" #include "class_linker.h" +#include "class_root.h" #include "dex/dex_file_annotations.h" #include "jni/jni_internal.h" #include "mirror/class-inl.h" @@ -66,12 +67,8 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) { annotations::GetExceptionTypesForMethod(method); if (result_array == nullptr) { // Return an empty array instead of a null pointer - ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> class_array_class = - Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class); - if (class_array_class == nullptr) { - return nullptr; - } + ObjPtr<mirror::Class> class_array_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(); + DCHECK(class_array_class != nullptr); mirror::ObjectArray<mirror::Class>* empty_array = mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0); return soa.AddLocalReference<jobjectArray>(empty_array); diff --git a/runtime/oat.h b/runtime/oat.h index 7b8f71a3f3..8069a15661 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,8 +32,8 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - // Last oat version changed reason: Refactor stackmap encoding. - static constexpr uint8_t kOatVersion[] = { '1', '4', '4', '\0' }; + // Last oat version changed reason: Optimize masks in stack maps. + static constexpr uint8_t kOatVersion[] = { '1', '4', '5', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc index 4e0bf890db..946ea018f3 100644 --- a/runtime/proxy_test.cc +++ b/runtime/proxy_test.cc @@ -44,9 +44,9 @@ TEST_F(ProxyTest, ProxyClassHelper) { ASSERT_TRUE(I != nullptr); ASSERT_TRUE(J != nullptr); - std::vector<mirror::Class*> interfaces; - interfaces.push_back(I.Get()); - interfaces.push_back(J.Get()); + std::vector<Handle<mirror::Class>> interfaces; + interfaces.push_back(I); + interfaces.push_back(J); Handle<mirror::Class> proxy_class(hs.NewHandle( GenerateProxyClass(soa, jclass_loader, class_linker_, "$Proxy1234", interfaces))); interfaces.clear(); // Don't least possibly stale objects in the array as good practice. @@ -80,9 +80,9 @@ TEST_F(ProxyTest, ProxyFieldHelper) { Handle<mirror::Class> proxyClass; { - std::vector<mirror::Class*> interfaces; - interfaces.push_back(I.Get()); - interfaces.push_back(J.Get()); + std::vector<Handle<mirror::Class>> interfaces; + interfaces.push_back(I); + interfaces.push_back(J); proxyClass = hs.NewHandle( GenerateProxyClass(soa, jclass_loader, class_linker_, "$Proxy1234", interfaces)); } @@ -131,7 +131,7 @@ TEST_F(ProxyTest, CheckArtMirrorFieldsOfProxyStaticFields) { Handle<mirror::Class> proxyClass0; Handle<mirror::Class> proxyClass1; { - std::vector<mirror::Class*> interfaces; + std::vector<Handle<mirror::Class>> interfaces; proxyClass0 = hs.NewHandle( GenerateProxyClass(soa, jclass_loader, class_linker_, "$Proxy0", interfaces)); proxyClass1 = hs.NewHandle( diff --git a/runtime/proxy_test.h b/runtime/proxy_test.h index fa5a449e31..860d96c116 100644 --- a/runtime/proxy_test.h +++ b/runtime/proxy_test.h @@ -25,6 +25,7 @@ #include "class_root.h" #include "mirror/class-inl.h" #include "mirror/method.h" +#include "obj_ptr-inl.h" namespace art { namespace proxy_test { @@ -32,11 +33,11 @@ namespace proxy_test { // Generate a proxy class with the given name and interfaces. This is a simplification from what // libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and // we do not declare exceptions. -mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, - jobject jclass_loader, - ClassLinker* class_linker, - const char* className, - const std::vector<mirror::Class*>& interfaces) +ObjPtr<mirror::Class> GenerateProxyClass(ScopedObjectAccess& soa, + jobject jclass_loader, + ClassLinker* class_linker, + const char* className, + const std::vector<Handle<mirror::Class>>& interfaces) REQUIRES_SHARED(Locks::mutator_lock_) { StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> javaLangObject = hs.NewHandle( @@ -46,21 +47,23 @@ mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jclass javaLangClass = soa.AddLocalReference<jclass>(mirror::Class::GetJavaLangClass()); // Builds the interfaces array. - jobjectArray proxyClassInterfaces = soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, - nullptr); + jobjectArray proxyClassInterfaces = + soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement */ nullptr); soa.Self()->AssertNoPendingException(); for (size_t i = 0; i < interfaces.size(); ++i) { soa.Env()->SetObjectArrayElement(proxyClassInterfaces, i, - soa.AddLocalReference<jclass>(interfaces[i])); + soa.AddLocalReference<jclass>(interfaces[i].Get())); } // Builds the method array. jsize methods_count = 3; // Object.equals, Object.hashCode and Object.toString. - for (mirror::Class* interface : interfaces) { + for (Handle<mirror::Class> interface : interfaces) { methods_count += interface->NumVirtualMethods(); } jobjectArray proxyClassMethods = soa.Env()->NewObjectArray( - methods_count, soa.AddLocalReference<jclass>(GetClassRoot<mirror::Method>()), nullptr); + methods_count, + soa.AddLocalReference<jclass>(GetClassRoot<mirror::Method>()), + /* initialElement */ nullptr); soa.Self()->AssertNoPendingException(); jsize array_index = 0; @@ -91,7 +94,7 @@ mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, proxyClassMethods, array_index++, soa.AddLocalReference<jobject>( mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method))); // Now adds all interfaces virtual methods. - for (mirror::Class* interface : interfaces) { + for (Handle<mirror::Class> interface : interfaces) { for (auto& m : interface->GetDeclaredVirtualMethods(kRuntimePointerSize)) { soa.Env()->SetObjectArrayElement( proxyClassMethods, array_index++, soa.AddLocalReference<jobject>( @@ -104,9 +107,13 @@ mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobjectArray proxyClassThrows = soa.Env()->NewObjectArray(0, javaLangClass, nullptr); soa.Self()->AssertNoPendingException(); - mirror::Class* proxyClass = class_linker->CreateProxyClass( - soa, soa.Env()->NewStringUTF(className), proxyClassInterfaces, jclass_loader, - proxyClassMethods, proxyClassThrows); + ObjPtr<mirror::Class> proxyClass = class_linker->CreateProxyClass( + soa, + soa.Env()->NewStringUTF(className), + proxyClassInterfaces, + jclass_loader, + proxyClassMethods, + proxyClassThrows); soa.Self()->AssertNoPendingException(); return proxyClass; } diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index de613d3b20..63a09f25a4 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -246,8 +246,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* // Copy values between them. for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) { - DexRegisterLocation::Kind catch_location = - catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info); + DexRegisterLocation::Kind catch_location = catch_vreg_map.GetLocationKind(vreg); if (catch_location == DexRegisterLocation::Kind::kNone) { continue; } @@ -255,9 +254,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* // Get vreg value from its current location. uint32_t vreg_value; - VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg, - number_of_vregs, - code_info)); + VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg)); bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(), vreg, vreg_kind, @@ -268,9 +265,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* << "native_pc_offset=" << stack_visitor->GetNativePcOffset() << ")"; // Copy value to the catch phi's stack slot. - int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg, - number_of_vregs, - code_info); + int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg); ArtMethod** frame_top = stack_visitor->GetCurrentQuickFrame(); uint8_t* slot_address = reinterpret_cast<uint8_t*>(frame_top) + slot_offset; uint32_t* slot_ptr = reinterpret_cast<uint32_t*>(slot_address); @@ -425,21 +420,18 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { continue; } - DexRegisterLocation::Kind location = - vreg_map.GetLocationKind(vreg, number_of_vregs, code_info); + DexRegisterLocation::Kind location = vreg_map.GetLocationKind(vreg); static constexpr uint32_t kDeadValue = 0xEBADDE09; uint32_t value = kDeadValue; bool is_reference = false; switch (location) { case DexRegisterLocation::Kind::kInStack: { - const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg, - number_of_vregs, - code_info); + const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg); const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset; value = *reinterpret_cast<const uint32_t*>(addr); uint32_t bit = (offset >> 2); - if (bit < code_info.GetNumberOfStackMaskBits() && stack_mask.LoadBit(bit)) { + if (bit < stack_mask.size_in_bits() && stack_mask.LoadBit(bit)) { is_reference = true; } break; @@ -448,7 +440,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { case DexRegisterLocation::Kind::kInRegisterHigh: case DexRegisterLocation::Kind::kInFpuRegister: case DexRegisterLocation::Kind::kInFpuRegisterHigh: { - uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info); + uint32_t reg = vreg_map.GetMachineRegister(vreg); bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value); CHECK(result); if (location == DexRegisterLocation::Kind::kInRegister) { @@ -459,7 +451,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { break; } case DexRegisterLocation::Kind::kConstant: { - value = vreg_map.GetConstant(vreg, number_of_vregs, code_info); + value = vreg_map.GetConstant(vreg); if (value == 0) { // Make it a reference for extra safety. is_reference = true; @@ -472,9 +464,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { default: { LOG(FATAL) << "Unexpected location kind " - << vreg_map.GetLocationInternalKind(vreg, - number_of_vregs, - code_info); + << vreg_map.GetLocationInternalKind(vreg); UNREACHABLE(); } } diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc index d2d720f722..424ee0681a 100644 --- a/runtime/reflection_test.cc +++ b/runtime/reflection_test.cc @@ -80,7 +80,7 @@ class ReflectionTest : public CommonCompilerTest { jclass GetPrimitiveClass(char descriptor) { ScopedObjectAccess soa(env_); - mirror::Class* c = class_linker_->FindPrimitiveClass(descriptor); + ObjPtr<mirror::Class> c = class_linker_->FindPrimitiveClass(descriptor); CHECK(c != nullptr); return soa.AddLocalReference<jclass>(c); } @@ -518,7 +518,7 @@ TEST_F(ReflectionTest, StaticMainMethod) { hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader))); CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V"); - mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader); + ObjPtr<mirror::Class> klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader); ASSERT_TRUE(klass != nullptr); ArtMethod* method = klass->FindClassMethod("main", diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 32d9d68d0d..6384d01aaf 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -1979,7 +1979,6 @@ void Runtime::VisitConstantRoots(RootVisitor* visitor) { // Visit the classes held as static in mirror classes, these can be visited concurrently and only // need to be visited once per GC since they never change. mirror::Class::VisitRoots(visitor); - mirror::String::VisitRoots(visitor); mirror::ClassExt::VisitRoots(visitor); // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are // null. diff --git a/runtime/stack.cc b/runtime/stack.cc index 8cb0700ce2..6da7dcb697 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -245,13 +245,10 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin if (!dex_register_map.IsValid()) { return false; } - DexRegisterLocation::Kind location_kind = - dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info); + DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg); switch (location_kind) { case DexRegisterLocation::Kind::kInStack: { - const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg, - number_of_dex_registers, - code_info); + const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg); const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset; *val = *reinterpret_cast<const uint32_t*>(addr); return true; @@ -260,21 +257,18 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin case DexRegisterLocation::Kind::kInRegisterHigh: case DexRegisterLocation::Kind::kInFpuRegister: case DexRegisterLocation::Kind::kInFpuRegisterHigh: { - uint32_t reg = - dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info); + uint32_t reg = dex_register_map.GetMachineRegister(vreg); return GetRegisterIfAccessible(reg, kind, val); } case DexRegisterLocation::Kind::kConstant: - *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info); + *val = dex_register_map.GetConstant(vreg); return true; case DexRegisterLocation::Kind::kNone: return false; default: LOG(FATAL) << "Unexpected location kind " - << dex_register_map.GetLocationInternalKind(vreg, - number_of_dex_registers, - code_info); + << dex_register_map.GetLocationInternalKind(vreg); UNREACHABLE(); } } diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc index 2b7e8dd748..61fe2e7965 100644 --- a/runtime/stack_map.cc +++ b/runtime/stack_map.cc @@ -52,27 +52,21 @@ std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind& } DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind( - uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const { + uint16_t dex_register_number) const { DexRegisterLocationCatalog dex_register_location_catalog = - code_info.GetDexRegisterLocationCatalog(); + code_info_.GetDexRegisterLocationCatalog(); size_t location_catalog_entry_index = GetLocationCatalogEntryIndex( dex_register_number, - number_of_dex_registers, - code_info.GetNumberOfLocationCatalogEntries()); + code_info_.GetNumberOfLocationCatalogEntries()); return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index); } -DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const { +DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number) const { DexRegisterLocationCatalog dex_register_location_catalog = - code_info.GetDexRegisterLocationCatalog(); + code_info_.GetDexRegisterLocationCatalog(); size_t location_catalog_entry_index = GetLocationCatalogEntryIndex( dex_register_number, - number_of_dex_registers, - code_info.GetNumberOfLocationCatalogEntries()); + code_info_.GetNumberOfLocationCatalogEntries()); return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index); } @@ -90,7 +84,7 @@ void StackMap::DumpEncoding(const BitTable<6>& table, VariableIndentationOutputStream* vios) { vios->Stream() << "StackMapEncoding" - << " (NativePcOffsetBits=" << table.NumColumnBits(kNativePcOffset) + << " (PackedNativePcBits=" << table.NumColumnBits(kPackedNativePc) << ", DexPcBits=" << table.NumColumnBits(kDexPc) << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset) << ", InlineInfoIndexBits=" << table.NumColumnBits(kInlineInfoIndex) @@ -160,18 +154,15 @@ void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios, } } -void DexRegisterMap::Dump(VariableIndentationOutputStream* vios, - const CodeInfo& code_info, - uint16_t number_of_dex_registers) const { - size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); +void DexRegisterMap::Dump(VariableIndentationOutputStream* vios) const { + size_t number_of_location_catalog_entries = code_info_.GetNumberOfLocationCatalogEntries(); // TODO: Display the bit mask of live Dex registers. - for (size_t j = 0; j < number_of_dex_registers; ++j) { + for (size_t j = 0; j < number_of_dex_registers_; ++j) { if (IsDexRegisterLive(j)) { size_t location_catalog_entry_index = GetLocationCatalogEntryIndex( - j, number_of_dex_registers, number_of_location_catalog_entries); - DexRegisterLocation location = GetDexRegisterLocation(j, - number_of_dex_registers, - code_info); + j, + number_of_location_catalog_entries); + DexRegisterLocation location = GetDexRegisterLocation(j); ScopedIndentation indent1(vios); DumpRegisterMapping( vios->Stream(), j, location, "v", @@ -200,14 +191,14 @@ void StackMap::Dump(VariableIndentationOutputStream* vios, << std::dec << ", stack_mask=0b"; BitMemoryRegion stack_mask = code_info.GetStackMaskOf(*this); - for (size_t i = 0, e = code_info.GetNumberOfStackMaskBits(); i < e; ++i) { + for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) { vios->Stream() << stack_mask.LoadBit(e - i - 1); } vios->Stream() << ")\n"; if (HasDexRegisterMap()) { DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf( *this, number_of_dex_registers); - dex_register_map.Dump(vios, code_info, number_of_dex_registers); + dex_register_map.Dump(vios); } if (HasInlineInfo()) { InlineInfo inline_info = code_info.GetInlineInfoOf(*this); @@ -244,7 +235,7 @@ void InlineInfo::Dump(VariableIndentationOutputStream* vios, DexRegisterMap dex_register_map = code_info.GetDexRegisterMapAtDepth(i, *this, number_of_dex_registers[i]); ScopedIndentation indent1(vios); - dex_register_map.Dump(vios, code_info, number_of_dex_registers[i]); + dex_register_map.Dump(vios); } } } diff --git a/runtime/stack_map.h b/runtime/stack_map.h index 91cecf0690..c558846bb3 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -19,7 +19,6 @@ #include <limits> -#include "arch/code_offset.h" #include "base/bit_memory_region.h" #include "base/bit_table.h" #include "base/bit_utils.h" @@ -446,53 +445,39 @@ class DexRegisterLocationCatalog { */ class DexRegisterMap { public: - explicit DexRegisterMap(MemoryRegion region) : region_(region) {} - DexRegisterMap() {} + DexRegisterMap(MemoryRegion region, uint16_t number_of_dex_registers, const CodeInfo& code_info) + : region_(region), + number_of_dex_registers_(number_of_dex_registers), + code_info_(code_info) {} bool IsValid() const { return region_.IsValid(); } // Get the surface kind of Dex register `dex_register_number`. - DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const { - return DexRegisterLocation::ConvertToSurfaceKind( - GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info)); + DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number) const { + return DexRegisterLocation::ConvertToSurfaceKind(GetLocationInternalKind(dex_register_number)); } // Get the internal kind of Dex register `dex_register_number`. - DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const; + DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number) const; // Get the Dex register location `dex_register_number`. - DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const; - - int32_t GetStackOffsetInBytes(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const { - DexRegisterLocation location = - GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info); + DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number) const; + + int32_t GetStackOffsetInBytes(uint16_t dex_register_number) const { + DexRegisterLocation location = GetDexRegisterLocation(dex_register_number); DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack); // GetDexRegisterLocation returns the offset in bytes. return location.GetValue(); } - int32_t GetConstant(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const { - DexRegisterLocation location = - GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info); + int32_t GetConstant(uint16_t dex_register_number) const { + DexRegisterLocation location = GetDexRegisterLocation(dex_register_number); DCHECK_EQ(location.GetKind(), DexRegisterLocation::Kind::kConstant); return location.GetValue(); } - int32_t GetMachineRegister(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info) const { - DexRegisterLocation location = - GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info); + int32_t GetMachineRegister(uint16_t dex_register_number) const { + DexRegisterLocation location = GetDexRegisterLocation(dex_register_number); DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister || location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh || location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister || @@ -504,7 +489,6 @@ class DexRegisterMap { // Get the index of the entry in the Dex register location catalog // corresponding to `dex_register_number`. size_t GetLocationCatalogEntryIndex(uint16_t dex_register_number, - uint16_t number_of_dex_registers, size_t number_of_location_catalog_entries) const { if (!IsDexRegisterLive(dex_register_number)) { return DexRegisterLocationCatalog::kNoLocationEntryIndex; @@ -519,9 +503,9 @@ class DexRegisterMap { // The bit offset of the beginning of the map locations. size_t map_locations_offset_in_bits = - GetLocationMappingDataOffset(number_of_dex_registers) * kBitsPerByte; + GetLocationMappingDataOffset(number_of_dex_registers_) * kBitsPerByte; size_t index_in_dex_register_map = GetIndexInDexRegisterMap(dex_register_number); - DCHECK_LT(index_in_dex_register_map, GetNumberOfLiveDexRegisters(number_of_dex_registers)); + DCHECK_LT(index_in_dex_register_map, GetNumberOfLiveDexRegisters()); // The bit size of an entry. size_t map_entry_size_in_bits = SingleEntrySizeInBits(number_of_location_catalog_entries); // The bit offset where `index_in_dex_register_map` is located. @@ -536,9 +520,8 @@ class DexRegisterMap { // Map entry at `index_in_dex_register_map` to `location_catalog_entry_index`. void SetLocationCatalogEntryIndex(size_t index_in_dex_register_map, size_t location_catalog_entry_index, - uint16_t number_of_dex_registers, size_t number_of_location_catalog_entries) { - DCHECK_LT(index_in_dex_register_map, GetNumberOfLiveDexRegisters(number_of_dex_registers)); + DCHECK_LT(index_in_dex_register_map, GetNumberOfLiveDexRegisters()); DCHECK_LT(location_catalog_entry_index, number_of_location_catalog_entries); if (number_of_location_catalog_entries == 1) { @@ -549,7 +532,7 @@ class DexRegisterMap { // The bit offset of the beginning of the map locations. size_t map_locations_offset_in_bits = - GetLocationMappingDataOffset(number_of_dex_registers) * kBitsPerByte; + GetLocationMappingDataOffset(number_of_dex_registers_) * kBitsPerByte; // The bit size of an entry. size_t map_entry_size_in_bits = SingleEntrySizeInBits(number_of_location_catalog_entries); // The bit offset where `index_in_dex_register_map` is located. @@ -581,6 +564,10 @@ class DexRegisterMap { return number_of_live_dex_registers; } + size_t GetNumberOfLiveDexRegisters() const { + return GetNumberOfLiveDexRegisters(number_of_dex_registers_); + } + static size_t GetLiveBitMaskOffset() { return kFixedSize; } @@ -595,10 +582,9 @@ class DexRegisterMap { return GetLiveBitMaskOffset() + GetLiveBitMaskSize(number_of_dex_registers); } - size_t GetLocationMappingDataSize(uint16_t number_of_dex_registers, - size_t number_of_location_catalog_entries) const { + size_t GetLocationMappingDataSize(size_t number_of_location_catalog_entries) const { size_t location_mapping_data_size_in_bits = - GetNumberOfLiveDexRegisters(number_of_dex_registers) + GetNumberOfLiveDexRegisters() * SingleEntrySizeInBits(number_of_location_catalog_entries); return RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte; } @@ -621,8 +607,7 @@ class DexRegisterMap { return BitsToBytesRoundUp(region_.size_in_bits()); } - void Dump(VariableIndentationOutputStream* vios, - const CodeInfo& code_info, uint16_t number_of_dex_registers) const; + void Dump(VariableIndentationOutputStream* vios) const; private: // Return the index in the Dex register map corresponding to the Dex @@ -642,6 +627,8 @@ class DexRegisterMap { static constexpr int kFixedSize = 0; BitMemoryRegion region_; + uint16_t number_of_dex_registers_; + const CodeInfo& code_info_; friend class CodeInfo; friend class StackMapStream; @@ -658,7 +645,7 @@ class DexRegisterMap { class StackMap : public BitTable<6>::Accessor { public: enum Field { - kNativePcOffset, + kPackedNativePc, kDexPc, kDexRegisterMapOffset, kInlineInfoIndex, @@ -672,8 +659,7 @@ class StackMap : public BitTable<6>::Accessor { : BitTable<kCount>::Accessor(table, row) {} ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const { - CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>())); - return offset.Uint32Value(instruction_set); + return UnpackNativePc(Get<kPackedNativePc>(), instruction_set); } uint32_t GetDexPc() const { return Get<kDexPc>(); } @@ -688,6 +674,17 @@ class StackMap : public BitTable<6>::Accessor { uint32_t GetStackMaskIndex() const { return Get<kStackMaskIndex>(); } + static uint32_t PackNativePc(uint32_t native_pc, InstructionSet isa) { + DCHECK_ALIGNED_PARAM(native_pc, GetInstructionSetInstructionAlignment(isa)); + return native_pc / GetInstructionSetInstructionAlignment(isa); + } + + static uint32_t UnpackNativePc(uint32_t packed_native_pc, InstructionSet isa) { + uint32_t native_pc = packed_native_pc * GetInstructionSetInstructionAlignment(isa); + DCHECK_EQ(native_pc / GetInstructionSetInstructionAlignment(isa), packed_native_pc); + return native_pc; + } + static void DumpEncoding(const BitTable<6>& table, VariableIndentationOutputStream* vios); void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, @@ -776,7 +773,7 @@ class InlineInfo : public BitTable<5>::Accessor { class InvokeInfo : public BitTable<3>::Accessor { public: enum Field { - kNativePcOffset, + kPackedNativePc, kInvokeType, kMethodIndexIdx, kCount, @@ -786,8 +783,7 @@ class InvokeInfo : public BitTable<3>::Accessor { : BitTable<kCount>::Accessor(table, row) {} ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const { - CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>())); - return offset.Uint32Value(instruction_set); + return StackMap::UnpackNativePc(Get<kPackedNativePc>(), instruction_set); } uint32_t GetInvokeType() const { return Get<kInvokeType>(); } @@ -799,6 +795,24 @@ class InvokeInfo : public BitTable<3>::Accessor { } }; +// Register masks tend to have many trailing zero bits (caller-saves are usually not encoded), +// therefore it is worth encoding the mask as value+shift. +class RegisterMask : public BitTable<2>::Accessor { + public: + enum Field { + kValue, + kShift, + kCount, + }; + + RegisterMask(const BitTable<kCount>* table, uint32_t row) + : BitTable<kCount>::Accessor(table, row) {} + + ALWAYS_INLINE uint32_t GetMask() const { + return Get<kValue>() << Get<kShift>(); + } +}; + /** * Wrapper around all compiler information collected for a method. * The information is of the form: @@ -833,24 +847,22 @@ class CodeInfo { return DexRegisterLocationCatalog(location_catalog_); } - ALWAYS_INLINE size_t GetNumberOfStackMaskBits() const { - return stack_mask_bits_; - } - ALWAYS_INLINE StackMap GetStackMapAt(size_t index) const { return StackMap(&stack_maps_, index); } BitMemoryRegion GetStackMask(size_t index) const { - return stack_masks_.Subregion(index * stack_mask_bits_, stack_mask_bits_); + return stack_masks_.GetBitMemoryRegion(index); } BitMemoryRegion GetStackMaskOf(const StackMap& stack_map) const { - return GetStackMask(stack_map.GetStackMaskIndex()); + uint32_t index = stack_map.GetStackMaskIndex(); + return (index == StackMap::kNoValue) ? BitMemoryRegion() : GetStackMask(index); } uint32_t GetRegisterMaskOf(const StackMap& stack_map) const { - return register_masks_.Get(stack_map.GetRegisterMaskIndex()); + uint32_t index = stack_map.GetRegisterMaskIndex(); + return (index == StackMap::kNoValue) ? 0 : RegisterMask(®ister_masks_, index).GetMask(); } uint32_t GetNumberOfLocationCatalogEntries() const { @@ -872,11 +884,13 @@ class CodeInfo { DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, size_t number_of_dex_registers) const { if (!stack_map.HasDexRegisterMap()) { - return DexRegisterMap(); + return DexRegisterMap(MemoryRegion(), 0, *this); } const uint32_t offset = stack_map.GetDexRegisterMapOffset(); size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers); - return DexRegisterMap(dex_register_maps_.Subregion(offset, size)); + return DexRegisterMap(dex_register_maps_.Subregion(offset, size), + number_of_dex_registers, + *this); } size_t GetDexRegisterMapsSize(uint32_t number_of_dex_registers) const { @@ -894,11 +908,13 @@ class CodeInfo { InlineInfo inline_info, uint32_t number_of_dex_registers) const { if (!inline_info.HasDexRegisterMapAtDepth(depth)) { - return DexRegisterMap(); + return DexRegisterMap(MemoryRegion(), 0, *this); } else { uint32_t offset = inline_info.GetDexRegisterMapOffsetAtDepth(depth); size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers); - return DexRegisterMap(dex_register_maps_.Subregion(offset, size)); + return DexRegisterMap(dex_register_maps_.Subregion(offset, size), + number_of_dex_registers, + *this); } } @@ -1012,9 +1028,11 @@ class CodeInfo { // art::DexRegisterMap::GetNumberOfLiveDexRegisters and DexRegisterMap dex_register_map_without_locations( MemoryRegion(dex_register_maps_.Subregion(dex_register_map_offset, - location_mapping_data_offset_in_dex_register_map))); + location_mapping_data_offset_in_dex_register_map)), + number_of_dex_registers, + *this); size_t number_of_live_dex_registers = - dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers); + dex_register_map_without_locations.GetNumberOfLiveDexRegisters(); size_t location_mapping_data_size_in_bits = DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries()) * number_of_live_dex_registers; @@ -1045,8 +1063,8 @@ class CodeInfo { invoke_infos_.Decode(bit_region, &bit_offset); inline_infos_.Decode(bit_region, &bit_offset); register_masks_.Decode(bit_region, &bit_offset); - stack_mask_bits_ = DecodeVarintBits(bit_region, &bit_offset); - stack_masks_ = bit_region.Subregion(bit_offset, non_header_size * kBitsPerByte - bit_offset); + stack_masks_.Decode(bit_region, &bit_offset); + CHECK_EQ(BitsToBytesRoundUp(bit_offset), non_header_size); } size_t size_; @@ -1056,11 +1074,11 @@ class CodeInfo { BitTable<StackMap::Field::kCount> stack_maps_; BitTable<InvokeInfo::Field::kCount> invoke_infos_; BitTable<InlineInfo::Field::kCount> inline_infos_; - BitTable<1> register_masks_; - uint32_t stack_mask_bits_ = 0; - BitMemoryRegion stack_masks_; + BitTable<RegisterMask::Field::kCount> register_masks_; + BitTable<1> stack_masks_; friend class OatDumper; + friend class StackMapStream; }; #undef ELEMENT_BYTE_OFFSET_AFTER diff --git a/runtime/thread.cc b/runtime/thread.cc index f5d2ffbb17..b59606a06b 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -2862,27 +2862,18 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); StackHandleScope<6> hs(soa.Self()); - mirror::Class* aste_array_class = class_linker->FindClass( + Handle<mirror::Class> h_aste_array_class = hs.NewHandle(class_linker->FindSystemClass( soa.Self(), - "[Ldalvik/system/AnnotatedStackTraceElement;", - ScopedNullHandle<mirror::ClassLoader>()); - if (aste_array_class == nullptr) { + "[Ldalvik/system/AnnotatedStackTraceElement;")); + if (h_aste_array_class == nullptr) { return nullptr; } - Handle<mirror::Class> h_aste_array_class(hs.NewHandle<mirror::Class>(aste_array_class)); + Handle<mirror::Class> h_aste_class = hs.NewHandle(h_aste_array_class->GetComponentType()); - mirror::Class* o_array_class = class_linker->FindClass(soa.Self(), - "[Ljava/lang/Object;", - ScopedNullHandle<mirror::ClassLoader>()); - if (o_array_class == nullptr) { - // This should not fail in a healthy runtime. - soa.Self()->AssertPendingException(); - return nullptr; - } - Handle<mirror::Class> h_o_array_class(hs.NewHandle<mirror::Class>(o_array_class)); + Handle<mirror::Class> h_o_array_class = + hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker)); + DCHECK(h_o_array_class != nullptr); // Class roots must be already initialized. - Handle<mirror::Class> h_aste_class(hs.NewHandle<mirror::Class>( - h_aste_array_class->GetComponentType())); // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 . class_linker->EnsureInitialized(soa.Self(), @@ -2906,7 +2897,7 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu size_t length = dumper.stack_trace_elements_.size(); ObjPtr<mirror::ObjectArray<mirror::Object>> array = - mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), aste_array_class, length); + mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), h_aste_array_class.Get(), length); if (array == nullptr) { soa.Self()->AssertPendingOOMException(); return nullptr; @@ -3568,9 +3559,8 @@ class ReferenceMapVisitor : public StackVisitor { T vreg_info(m, code_info, map, visitor_); // Visit stack entries that hold pointers. - const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(); BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map); - for (size_t i = 0; i < number_of_bits; ++i) { + for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) { if (stack_mask.LoadBit(i)) { StackReference<mirror::Object>* ref_addr = vreg_base + i; mirror::Object* ref = ref_addr->AsMirrorPtr(); @@ -3680,8 +3670,7 @@ class ReferenceMapVisitor : public StackVisitor { REQUIRES_SHARED(Locks::mutator_lock_) { bool found = false; for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) { - DexRegisterLocation location = dex_register_map.GetDexRegisterLocation( - dex_reg, number_of_dex_registers, code_info); + DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(dex_reg); if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) { visitor(ref, dex_reg, stack_visitor); found = true; diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc index 838d7f14bc..e2f42c937d 100644 --- a/runtime/vdex_file.cc +++ b/runtime/vdex_file.cc @@ -28,6 +28,7 @@ #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "dex/art_dex_file_loader.h" +#include "dex/class_accessor-inl.h" #include "dex/dex_file.h" #include "dex/dex_file_loader.h" #include "dex/hidden_api_access_flags.h" @@ -283,31 +284,26 @@ void VdexFile::UnquickenDexFile(const DexFile& target_dex_file, std::unordered_set<const DexFile::CodeItem*> unquickened_code_item; CompactOffsetTable::Accessor accessor(GetQuickenInfoOffsetTable(source_dex_begin, quickening_info)); - for (uint32_t i = 0; i < target_dex_file.NumClassDefs(); ++i) { - const DexFile::ClassDef& class_def = target_dex_file.GetClassDef(i); - const uint8_t* class_data = target_dex_file.GetClassData(class_def); - if (class_data != nullptr) { - for (ClassDataItemIterator class_it(target_dex_file, class_data); - class_it.HasNext(); - class_it.Next()) { - if (class_it.IsAtMethod()) { - const DexFile::CodeItem* code_item = class_it.GetMethodCodeItem(); - if (code_item != nullptr && unquickened_code_item.emplace(code_item).second) { - const uint32_t offset = accessor.GetOffset(class_it.GetMemberIndex()); - // Offset being 0 means not quickened. - if (offset != 0u) { - ArrayRef<const uint8_t> quicken_data = GetQuickeningInfoAt(quickening_info, offset); - optimizer::ArtDecompileDEX( - target_dex_file, - *code_item, - quicken_data, - decompile_return_instruction); - } - } + for (ClassAccessor class_accessor : target_dex_file.GetClasses()) { + for (const ClassAccessor::Method& method : class_accessor.GetMethods()) { + const DexFile::CodeItem* code_item = method.GetCodeItem(); + if (code_item != nullptr && unquickened_code_item.emplace(code_item).second) { + const uint32_t offset = accessor.GetOffset(method.GetIndex()); + // Offset being 0 means not quickened. + if (offset != 0u) { + ArrayRef<const uint8_t> quicken_data = GetQuickeningInfoAt(quickening_info, offset); + optimizer::ArtDecompileDEX( + target_dex_file, + *code_item, + quicken_data, + decompile_return_instruction); } - DexFile::UnHideAccessFlags(class_it); + method.UnHideAccessFlags(); } } + for (const ClassAccessor::Field& field : class_accessor.GetFields()) { + field.UnHideAccessFlags(); + } } } diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 287e3d619a..f4967f70be 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -35,6 +35,7 @@ #include "class_linker.h" #include "class_root.h" #include "compiler_callbacks.h" +#include "dex/class_accessor-inl.h" #include "dex/descriptors_names.h" #include "dex/dex_file-inl.h" #include "dex/dex_file_exception_helpers.h" @@ -144,7 +145,7 @@ static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, Register } FailureKind MethodVerifier::VerifyClass(Thread* self, - mirror::Class* klass, + ObjPtr<mirror::Class> klass, CompilerCallbacks* callbacks, bool allow_soft_failures, HardFailLogMode log_level, @@ -190,11 +191,6 @@ FailureKind MethodVerifier::VerifyClass(Thread* self, error); } -template <bool kDirect> -static bool HasNextMethod(ClassDataItemIterator* it) { - return kDirect ? it->HasNextDirectMethod() : it->HasNextVirtualMethod(); -} - static FailureKind FailureKindMax(FailureKind fk1, FailureKind fk2) { static_assert(FailureKind::kNoFailure < FailureKind::kSoftFailure && FailureKind::kSoftFailure < FailureKind::kHardFailure, @@ -207,45 +203,51 @@ void MethodVerifier::FailureData::Merge(const MethodVerifier::FailureData& fd) { types |= fd.types; } -template <bool kDirect> -MethodVerifier::FailureData MethodVerifier::VerifyMethods(Thread* self, - ClassLinker* linker, - const DexFile* dex_file, - const DexFile::ClassDef& class_def, - ClassDataItemIterator* it, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - CompilerCallbacks* callbacks, - bool allow_soft_failures, - HardFailLogMode log_level, - bool need_precise_constants, - std::string* error_string) { - DCHECK(it != nullptr); +FailureKind MethodVerifier::VerifyClass(Thread* self, + const DexFile* dex_file, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + const DexFile::ClassDef& class_def, + CompilerCallbacks* callbacks, + bool allow_soft_failures, + HardFailLogMode log_level, + std::string* error) { + SCOPED_TRACE << "VerifyClass " << PrettyDescriptor(dex_file->GetClassDescriptor(class_def)); + // A class must not be abstract and final. + if ((class_def.access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) { + *error = "Verifier rejected class "; + *error += PrettyDescriptor(dex_file->GetClassDescriptor(class_def)); + *error += ": class is abstract and final."; + return FailureKind::kHardFailure; + } + + ClassAccessor accessor(*dex_file, class_def); + + int64_t previous_method_idx[2] = { -1, -1 }; MethodVerifier::FailureData failure_data; + ClassLinker* const linker = Runtime::Current()->GetClassLinker(); - int64_t previous_method_idx = -1; - while (HasNextMethod<kDirect>(it)) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { + int64_t* previous_idx = &previous_method_idx[method.IsStaticOrDirect() ? 0u : 1u]; self->AllowThreadSuspension(); - uint32_t method_idx = it->GetMemberIndex(); - if (method_idx == previous_method_idx) { + const uint32_t method_idx = method.GetIndex(); + if (method_idx == *previous_idx) { // smali can create dex files with two encoded_methods sharing the same method_idx // http://code.google.com/p/smali/issues/detail?id=119 - it->Next(); continue; } - previous_method_idx = method_idx; - InvokeType type = it->GetMethodInvokeType(class_def); - ArtMethod* method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( + *previous_idx = method_idx; + const InvokeType type = method.GetInvokeType(class_def.access_flags_); + ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( method_idx, dex_cache, class_loader, /* referrer */ nullptr, type); - if (method == nullptr) { + if (resolved_method == nullptr) { DCHECK(self->IsExceptionPending()); // We couldn't resolve the method, but continue regardless. self->ClearException(); } else { - DCHECK(method->GetDeclaringClassUnchecked() != nullptr) << type; + DCHECK(resolved_method->GetDeclaringClassUnchecked() != nullptr) << type; } - StackHandleScope<1> hs(self); std::string hard_failure_msg; MethodVerifier::FailureData result = VerifyMethod(self, method_idx, @@ -253,99 +255,39 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethods(Thread* self, dex_cache, class_loader, class_def, - it->GetMethodCodeItem(), - method, - it->GetMethodAccessFlags(), + method.GetCodeItem(), + resolved_method, + method.GetAccessFlags(), callbacks, allow_soft_failures, log_level, - need_precise_constants, + /*need_precise_constants*/ false, &hard_failure_msg); if (result.kind == FailureKind::kHardFailure) { if (failure_data.kind == FailureKind::kHardFailure) { // If we logged an error before, we need a newline. - *error_string += "\n"; + *error += "\n"; } else { // If we didn't log a hard failure before, print the header of the message. - *error_string += "Verifier rejected class "; - *error_string += PrettyDescriptor(dex_file->GetClassDescriptor(class_def)); - *error_string += ":"; + *error += "Verifier rejected class "; + *error += PrettyDescriptor(dex_file->GetClassDescriptor(class_def)); + *error += ":"; } - *error_string += " "; - *error_string += hard_failure_msg; + *error += " "; + *error += hard_failure_msg; } failure_data.Merge(result); - it->Next(); } - return failure_data; -} - -FailureKind MethodVerifier::VerifyClass(Thread* self, - const DexFile* dex_file, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - const DexFile::ClassDef& class_def, - CompilerCallbacks* callbacks, - bool allow_soft_failures, - HardFailLogMode log_level, - std::string* error) { - SCOPED_TRACE << "VerifyClass " << PrettyDescriptor(dex_file->GetClassDescriptor(class_def)); - - // A class must not be abstract and final. - if ((class_def.access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) { - *error = "Verifier rejected class "; - *error += PrettyDescriptor(dex_file->GetClassDescriptor(class_def)); - *error += ": class is abstract and final."; - return FailureKind::kHardFailure; - } - - const uint8_t* class_data = dex_file->GetClassData(class_def); - if (class_data == nullptr) { - // empty class, probably a marker interface - return FailureKind::kNoFailure; - } - ClassDataItemIterator it(*dex_file, class_data); - it.SkipAllFields(); - ClassLinker* linker = Runtime::Current()->GetClassLinker(); - // Direct methods. - MethodVerifier::FailureData data1 = VerifyMethods<true>(self, - linker, - dex_file, - class_def, - &it, - dex_cache, - class_loader, - callbacks, - allow_soft_failures, - log_level, - false /* need precise constants */, - error); - // Virtual methods. - MethodVerifier::FailureData data2 = VerifyMethods<false>(self, - linker, - dex_file, - class_def, - &it, - dex_cache, - class_loader, - callbacks, - allow_soft_failures, - log_level, - false /* need precise constants */, - error); - - data1.Merge(data2); - - if (data1.kind == FailureKind::kNoFailure) { + if (failure_data.kind == FailureKind::kNoFailure) { return FailureKind::kNoFailure; } else { - if ((data1.types & VERIFY_ERROR_LOCKING) != 0) { + if ((failure_data.types & VERIFY_ERROR_LOCKING) != 0) { // Print a warning about expected slow-down. Use a string temporary to print one contiguous // warning. std::string tmp = StringPrintf("Class %s failed lock verification and will run slower.", - PrettyDescriptor(dex_file->GetClassDescriptor(class_def)).c_str()); + PrettyDescriptor(accessor.GetDescriptor()).c_str()); if (!gPrintedDxMonitorText) { tmp = tmp + "\nCommon causes for lock verification issues are non-optimized dex code\n" "and incorrect proguard optimizations."; @@ -353,7 +295,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self, } LOG(WARNING) << tmp; } - return data1.kind; + return failure_data.kind; } } @@ -1924,15 +1866,11 @@ bool MethodVerifier::CodeFlowVerifyMethod() { static uint32_t GetFirstFinalInstanceFieldIndex(const DexFile& dex_file, dex::TypeIndex type_idx) { const DexFile::ClassDef* class_def = dex_file.FindClassDef(type_idx); DCHECK(class_def != nullptr); - const uint8_t* class_data = dex_file.GetClassData(*class_def); - DCHECK(class_data != nullptr); - ClassDataItemIterator it(dex_file, class_data); - it.SkipStaticFields(); - while (it.HasNextInstanceField()) { - if ((it.GetFieldAccessFlags() & kAccFinal) != 0) { - return it.GetMemberIndex(); + ClassAccessor accessor(dex_file, *class_def); + for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) { + if (field.IsFinal()) { + return field.GetIndex(); } - it.Next(); } return dex::kDexNoIndex; } @@ -3366,7 +3304,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } break; } - auto* klass = declaring_class.GetClass(); + ObjPtr<mirror::Class> klass = declaring_class.GetClass(); for (uint32_t i = 0, num_fields = klass->NumInstanceFields(); i < num_fields; ++i) { if (klass->GetInstanceField(i)->IsFinal()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-no-barrier not expected for " @@ -3667,10 +3605,10 @@ const RegType& MethodVerifier::ResolveClass(dex::TypeIndex class_idx) { UninstantiableError(descriptor); precise = false; } - result = reg_types_.FindClass(klass.Ptr(), precise); + result = reg_types_.FindClass(klass, precise); if (result == nullptr) { const char* descriptor = dex_file_->StringByTypeIdx(class_idx); - result = reg_types_.InsertClass(descriptor, klass.Ptr(), precise); + result = reg_types_.InsertClass(descriptor, klass, precise); } } else { const char* descriptor = dex_file_->StringByTypeIdx(class_idx); @@ -4943,7 +4881,7 @@ const RegType& MethodVerifier::GetDeclaringClass() { const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_)); if (method_being_verified_ != nullptr) { - mirror::Class* klass = method_being_verified_->GetDeclaringClass(); + ObjPtr<mirror::Class> klass = method_being_verified_->GetDeclaringClass(); declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()); } else { declaring_class_ = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false); @@ -5045,7 +4983,7 @@ void MethodVerifier::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) } const RegType& MethodVerifier::FromClass(const char* descriptor, - mirror::Class* klass, + ObjPtr<mirror::Class> klass, bool precise) { DCHECK(klass != nullptr); if (precise && !klass->IsInstantiable() && !klass->IsPrimitive()) { diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index 531d3dabfa..9890af9d95 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -96,7 +96,7 @@ class MethodVerifier { public: // Verify a class. Returns "kNoFailure" on success. static FailureKind VerifyClass(Thread* self, - mirror::Class* klass, + ObjPtr<mirror::Class> klass, CompilerCallbacks* callbacks, bool allow_soft_failures, HardFailLogMode log_level, @@ -275,23 +275,6 @@ class MethodVerifier { void Merge(const FailureData& src); }; - // Verify all direct or virtual methods of a class. The method assumes that the iterator is - // positioned correctly, and the iterator will be updated. - template <bool kDirect> - static FailureData VerifyMethods(Thread* self, - ClassLinker* linker, - const DexFile* dex_file, - const DexFile::ClassDef& class_def, - ClassDataItemIterator* it, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - CompilerCallbacks* callbacks, - bool allow_soft_failures, - HardFailLogMode log_level, - bool need_precise_constants, - std::string* error_string) - REQUIRES_SHARED(Locks::mutator_lock_); - /* * Perform verification on a single method. * @@ -691,7 +674,7 @@ class MethodVerifier { // non-precise reference will be returned. // Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is // actually touched. - const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise) + const RegType& FromClass(const char* descriptor, ObjPtr<mirror::Class> klass, bool precise) REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE bool FailOrAbort(bool condition, const char* error_msg, uint32_t work_insn_idx); diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc index db3f093905..d1be9fa6f8 100644 --- a/runtime/verifier/method_verifier_test.cc +++ b/runtime/verifier/method_verifier_test.cc @@ -37,7 +37,7 @@ class MethodVerifierTest : public CommonRuntimeTest { REQUIRES_SHARED(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != nullptr); Thread* self = Thread::Current(); - mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str()); + ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(self, descriptor.c_str()); // Verify the class std::string error_msg; diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index e7864a28a0..73e516c7bf 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -54,17 +54,19 @@ const DoubleHiType* DoubleHiType::instance_ = nullptr; const IntegerType* IntegerType::instance_ = nullptr; const NullType* NullType::instance_ = nullptr; -PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) +PrimitiveType::PrimitiveType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, + uint16_t cache_id) : RegType(klass, descriptor, cache_id) { CHECK(klass != nullptr); CHECK(!descriptor.empty()); } -Cat1Type::Cat1Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) +Cat1Type::Cat1Type(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) : PrimitiveType(klass, descriptor, cache_id) { } -Cat2Type::Cat2Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) +Cat2Type::Cat2Type(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) : PrimitiveType(klass, descriptor, cache_id) { } @@ -129,7 +131,7 @@ std::string IntegerType::Dump() const { return "Integer"; } -const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass, +const DoubleHiType* DoubleHiType::CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); @@ -144,7 +146,7 @@ void DoubleHiType::Destroy() { } } -const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass, +const DoubleLoType* DoubleLoType::CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); @@ -159,14 +161,16 @@ void DoubleLoType::Destroy() { } } -const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, +const LongLoType* LongLoType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); instance_ = new LongLoType(klass, descriptor, cache_id); return instance_; } -const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, +const LongHiType* LongHiType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); instance_ = new LongHiType(klass, descriptor, cache_id); @@ -187,7 +191,8 @@ void LongLoType::Destroy() { } } -const FloatType* FloatType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, +const FloatType* FloatType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); instance_ = new FloatType(klass, descriptor, cache_id); @@ -201,7 +206,8 @@ void FloatType::Destroy() { } } -const CharType* CharType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, +const CharType* CharType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); instance_ = new CharType(klass, descriptor, cache_id); @@ -215,7 +221,8 @@ void CharType::Destroy() { } } -const ShortType* ShortType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, +const ShortType* ShortType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); instance_ = new ShortType(klass, descriptor, cache_id); @@ -229,7 +236,8 @@ void ShortType::Destroy() { } } -const ByteType* ByteType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, +const ByteType* ByteType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); instance_ = new ByteType(klass, descriptor, cache_id); @@ -243,7 +251,8 @@ void ByteType::Destroy() { } } -const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, +const IntegerType* IntegerType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); instance_ = new IntegerType(klass, descriptor, cache_id); @@ -257,7 +266,7 @@ void IntegerType::Destroy() { } } -const ConflictType* ConflictType::CreateInstance(mirror::Class* klass, +const ConflictType* ConflictType::CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); @@ -272,8 +281,9 @@ void ConflictType::Destroy() { } } -const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor, - uint16_t cache_id) { +const BooleanType* BooleanType::CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, + uint16_t cache_id) { CHECK(BooleanType::instance_ == nullptr); instance_ = new BooleanType(klass, descriptor, cache_id); return BooleanType::instance_; @@ -290,7 +300,7 @@ std::string UndefinedType::Dump() const REQUIRES_SHARED(Locks::mutator_lock_) { return "Undefined"; } -const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass, +const UndefinedType* UndefinedType::CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); @@ -305,7 +315,8 @@ void UndefinedType::Destroy() { } } -PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor, +PreciseReferenceType::PreciseReferenceType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) : RegType(klass, descriptor, cache_id) { // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError @@ -505,7 +516,7 @@ bool UnresolvedType::IsNonZeroReferenceTypes() const { const RegType& RegType::GetSuperClass(RegTypeCache* cache) const { if (!IsUnresolvedTypes()) { - mirror::Class* super_klass = GetClass()->GetSuperClass(); + ObjPtr<mirror::Class> super_klass = GetClass()->GetSuperClass(); if (super_klass != nullptr) { // A super class of a precise type isn't precise as a precise type indicates the register // holds exactly that type. @@ -543,7 +554,7 @@ bool RegType::IsObjectArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '['); return descriptor_[0] == '['; } else if (HasClass()) { - mirror::Class* type = GetClass(); + ObjPtr<mirror::Class> type = GetClass(); return type->IsArrayClass() && !type->GetComponentType()->IsPrimitive(); } else { return false; @@ -569,7 +580,7 @@ bool RegType::IsArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_) { bool RegType::IsJavaLangObjectArray() const { if (HasClass()) { - mirror::Class* type = GetClass(); + ObjPtr<mirror::Class> type = GetClass(); return type->IsArrayClass() && type->GetComponentType()->IsObjectClass(); } return false; @@ -712,11 +723,10 @@ const RegType& RegType::Merge(const RegType& incoming_type, // mechanics to continue. return reg_types->FromUnresolvedMerge(*this, incoming_type, verifier); } else { // Two reference types, compute Join - mirror::Class* c1 = GetClass(); - mirror::Class* c2 = incoming_type.GetClass(); - DCHECK(c1 != nullptr && !c1->IsPrimitive()); - DCHECK(c2 != nullptr && !c2->IsPrimitive()); - mirror::Class* join_class = ClassJoin(c1, c2); + // Do not cache the classes as ClassJoin() can suspend and invalidate ObjPtr<>s. + DCHECK(GetClass() != nullptr && !GetClass()->IsPrimitive()); + DCHECK(incoming_type.GetClass() != nullptr && !incoming_type.GetClass()->IsPrimitive()); + ObjPtr<mirror::Class> join_class = ClassJoin(GetClass(), incoming_type.GetClass()); if (UNLIKELY(join_class == nullptr)) { // Internal error joining the classes (e.g., OOME). Report an unresolved reference type. // We cannot report an unresolved merge type, as that will attempt to merge the resolved @@ -731,30 +741,37 @@ const RegType& RegType::Merge(const RegType& incoming_type, // (In that case, it is likely a misconfiguration of dex2oat.) if (!kIsTargetBuild && Runtime::Current()->IsAotCompiler()) { LOG(FATAL) << "Could not create class join of " - << c1->PrettyClass() + << GetClass()->PrettyClass() << " & " - << c2->PrettyClass(); + << incoming_type.GetClass()->PrettyClass(); UNREACHABLE(); } return reg_types->MakeUnresolvedReference(); } - // Record the dependency that both `c1` and `c2` are assignable to `join_class`. - // The `verifier` is null during unit tests. + // Record the dependency that both `GetClass()` and `incoming_type.GetClass()` + // are assignable to `join_class`. The `verifier` is null during unit tests. if (verifier != nullptr) { - VerifierDeps::MaybeRecordAssignability( - verifier->GetDexFile(), join_class, c1, true /* strict */, true /* is_assignable */); - VerifierDeps::MaybeRecordAssignability( - verifier->GetDexFile(), join_class, c2, true /* strict */, true /* is_assignable */); + VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(), + join_class, + GetClass(), + /* strict */ true, + /* is_assignable */ true); + VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(), + join_class, + incoming_type.GetClass(), + /* strict */ true, + /* is_assignable */ true); } - if (c1 == join_class && !IsPreciseReference()) { + if (GetClass() == join_class && !IsPreciseReference()) { return *this; - } else if (c2 == join_class && !incoming_type.IsPreciseReference()) { + } else if (incoming_type.GetClass() == join_class && !incoming_type.IsPreciseReference()) { return incoming_type; } else { std::string temp; - return reg_types->FromClass(join_class->GetDescriptor(&temp), join_class, false); + const char* descriptor = join_class->GetDescriptor(&temp); + return reg_types->FromClass(descriptor, join_class, /* precise */ false); } } } else { @@ -763,7 +780,7 @@ const RegType& RegType::Merge(const RegType& incoming_type, } // See comment in reg_type.h -mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) { +ObjPtr<mirror::Class> RegType::ClassJoin(ObjPtr<mirror::Class> s, ObjPtr<mirror::Class> t) { DCHECK(!s->IsPrimitive()) << s->PrettyClass(); DCHECK(!t->IsPrimitive()) << t->PrettyClass(); if (s == t) { @@ -773,12 +790,12 @@ mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) { } else if (t->IsAssignableFrom(s)) { return t; } else if (s->IsArrayClass() && t->IsArrayClass()) { - mirror::Class* s_ct = s->GetComponentType(); - mirror::Class* t_ct = t->GetComponentType(); + ObjPtr<mirror::Class> s_ct = s->GetComponentType(); + ObjPtr<mirror::Class> t_ct = t->GetComponentType(); if (s_ct->IsPrimitive() || t_ct->IsPrimitive()) { // Given the types aren't the same, if either array is of primitive types then the only // common parent is java.lang.Object - mirror::Class* result = s->GetSuperClass(); // short-cut to java.lang.Object + ObjPtr<mirror::Class> result = s->GetSuperClass(); // short-cut to java.lang.Object DCHECK(result->IsObjectClass()); return result; } @@ -788,8 +805,9 @@ mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) { self->AssertPendingException(); return nullptr; } - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - mirror::Class* array_class = class_linker->FindArrayClass(self, &common_elem); + // Note: The following lookup invalidates existing ObjPtr<>s. + ObjPtr<mirror::Class> array_class = + Runtime::Current()->GetClassLinker()->FindArrayClass(self, &common_elem); if (UNLIKELY(array_class == nullptr)) { self->AssertPendingException(); return nullptr; @@ -971,7 +989,7 @@ bool RegType::CanAssignArray(const RegType& src, return cmp1.CanAssignArray(cmp2, reg_types, class_loader, verifier, soft_error); } -const NullType* NullType::CreateInstance(mirror::Class* klass, +const NullType* NullType::CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) { CHECK(instance_ == nullptr); diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index 3e994074a1..29da376091 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -191,7 +191,7 @@ class RegType { !IsUnresolvedSuperClass())); return descriptor_; } - mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) { + ObjPtr<mirror::Class> GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(!IsUnresolvedReference()); DCHECK(!klass_.IsNull()) << Dump(); DCHECK(HasClass()); @@ -318,7 +318,7 @@ class RegType { } protected: - RegType(mirror::Class* klass, + RegType(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : descriptor_(descriptor), @@ -365,7 +365,7 @@ class RegType { * * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy */ - static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t) + static ObjPtr<mirror::Class> ClassJoin(ObjPtr<mirror::Class> s, ObjPtr<mirror::Class> t) REQUIRES_SHARED(Locks::mutator_lock_); static bool AssignableFrom(const RegType& lhs, @@ -388,7 +388,7 @@ class ConflictType FINAL : public RegType { static const ConflictType* GetInstance() PURE; // Create the singleton instance. - static const ConflictType* CreateInstance(mirror::Class* klass, + static const ConflictType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -401,7 +401,8 @@ class ConflictType FINAL : public RegType { } private: - ConflictType(mirror::Class* klass, const StringPiece& descriptor, + ConflictType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -423,7 +424,7 @@ class UndefinedType FINAL : public RegType { static const UndefinedType* GetInstance() PURE; // Create the singleton instance. - static const UndefinedType* CreateInstance(mirror::Class* klass, + static const UndefinedType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -436,7 +437,8 @@ class UndefinedType FINAL : public RegType { } private: - UndefinedType(mirror::Class* klass, const StringPiece& descriptor, + UndefinedType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -447,7 +449,8 @@ class UndefinedType FINAL : public RegType { class PrimitiveType : public RegType { public: - PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, + PrimitiveType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); bool HasClassVirtual() const OVERRIDE { return true; } @@ -455,7 +458,7 @@ class PrimitiveType : public RegType { class Cat1Type : public PrimitiveType { public: - Cat1Type(mirror::Class* klass, const StringPiece& descriptor, + Cat1Type(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); }; @@ -463,7 +466,7 @@ class IntegerType FINAL : public Cat1Type { public: bool IsInteger() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); - static const IntegerType* CreateInstance(mirror::Class* klass, + static const IntegerType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -475,7 +478,8 @@ class IntegerType FINAL : public Cat1Type { } private: - IntegerType(mirror::Class* klass, const StringPiece& descriptor, + IntegerType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -487,7 +491,7 @@ class BooleanType FINAL : public Cat1Type { public: bool IsBoolean() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); - static const BooleanType* CreateInstance(mirror::Class* klass, + static const BooleanType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -499,7 +503,8 @@ class BooleanType FINAL : public Cat1Type { } private: - BooleanType(mirror::Class* klass, const StringPiece& descriptor, + BooleanType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -512,7 +517,7 @@ class ByteType FINAL : public Cat1Type { public: bool IsByte() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); - static const ByteType* CreateInstance(mirror::Class* klass, + static const ByteType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -524,7 +529,8 @@ class ByteType FINAL : public Cat1Type { } private: - ByteType(mirror::Class* klass, const StringPiece& descriptor, + ByteType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -536,7 +542,7 @@ class ShortType FINAL : public Cat1Type { public: bool IsShort() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); - static const ShortType* CreateInstance(mirror::Class* klass, + static const ShortType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -548,7 +554,7 @@ class ShortType FINAL : public Cat1Type { } private: - ShortType(mirror::Class* klass, const StringPiece& descriptor, + ShortType(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -560,7 +566,7 @@ class CharType FINAL : public Cat1Type { public: bool IsChar() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); - static const CharType* CreateInstance(mirror::Class* klass, + static const CharType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -572,7 +578,8 @@ class CharType FINAL : public Cat1Type { } private: - CharType(mirror::Class* klass, const StringPiece& descriptor, + CharType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -584,7 +591,7 @@ class FloatType FINAL : public Cat1Type { public: bool IsFloat() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); - static const FloatType* CreateInstance(mirror::Class* klass, + static const FloatType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -596,7 +603,8 @@ class FloatType FINAL : public Cat1Type { } private: - FloatType(mirror::Class* klass, const StringPiece& descriptor, + FloatType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -606,7 +614,8 @@ class FloatType FINAL : public Cat1Type { class Cat2Type : public PrimitiveType { public: - Cat2Type(mirror::Class* klass, const StringPiece& descriptor, + Cat2Type(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); }; @@ -615,7 +624,7 @@ class LongLoType FINAL : public Cat2Type { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); bool IsLongLo() const OVERRIDE { return true; } bool IsLong() const OVERRIDE { return true; } - static const LongLoType* CreateInstance(mirror::Class* klass, + static const LongLoType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -627,7 +636,8 @@ class LongLoType FINAL : public Cat2Type { } private: - LongLoType(mirror::Class* klass, const StringPiece& descriptor, + LongLoType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -639,7 +649,7 @@ class LongHiType FINAL : public Cat2Type { public: std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); bool IsLongHi() const OVERRIDE { return true; } - static const LongHiType* CreateInstance(mirror::Class* klass, + static const LongHiType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -651,7 +661,8 @@ class LongHiType FINAL : public Cat2Type { } private: - LongHiType(mirror::Class* klass, const StringPiece& descriptor, + LongHiType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -664,7 +675,7 @@ class DoubleLoType FINAL : public Cat2Type { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); bool IsDoubleLo() const OVERRIDE { return true; } bool IsDouble() const OVERRIDE { return true; } - static const DoubleLoType* CreateInstance(mirror::Class* klass, + static const DoubleLoType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -676,7 +687,8 @@ class DoubleLoType FINAL : public Cat2Type { } private: - DoubleLoType(mirror::Class* klass, const StringPiece& descriptor, + DoubleLoType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -688,9 +700,9 @@ class DoubleHiType FINAL : public Cat2Type { public: std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); virtual bool IsDoubleHi() const OVERRIDE { return true; } - static const DoubleHiType* CreateInstance(mirror::Class* klass, - const StringPiece& descriptor, - uint16_t cache_id) + static const DoubleHiType* CreateInstance(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, + uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); static const DoubleHiType* GetInstance() PURE; static void Destroy(); @@ -700,7 +712,8 @@ class DoubleHiType FINAL : public Cat2Type { } private: - DoubleHiType(mirror::Class* klass, const StringPiece& descriptor, + DoubleHiType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -872,7 +885,7 @@ class NullType FINAL : public RegType { static const NullType* GetInstance() PURE; // Create the singleton instance. - static const NullType* CreateInstance(mirror::Class* klass, + static const NullType* CreateInstance(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); @@ -892,7 +905,7 @@ class NullType FINAL : public RegType { } private: - NullType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) + NullType(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -906,8 +919,10 @@ class NullType FINAL : public RegType { // instructions and must be passed to a constructor. class UninitializedType : public RegType { public: - UninitializedType(mirror::Class* klass, const StringPiece& descriptor, - uint32_t allocation_pc, uint16_t cache_id) + UninitializedType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, + uint32_t allocation_pc, + uint16_t cache_id) : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {} bool IsUninitializedTypes() const OVERRIDE; @@ -929,9 +944,10 @@ class UninitializedType : public RegType { // Similar to ReferenceType but not yet having been passed to a constructor. class UninitializedReferenceType FINAL : public UninitializedType { public: - UninitializedReferenceType(mirror::Class* klass, + UninitializedReferenceType(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, - uint32_t allocation_pc, uint16_t cache_id) + uint32_t allocation_pc, + uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : UninitializedType(klass, descriptor, allocation_pc, cache_id) { CheckConstructorInvariants(this); @@ -969,7 +985,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType { // of a constructor. class UninitializedThisReferenceType FINAL : public UninitializedType { public: - UninitializedThisReferenceType(mirror::Class* klass, + UninitializedThisReferenceType(ObjPtr<mirror::Class> klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) @@ -1010,7 +1026,8 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { // sub-class. class ReferenceType FINAL : public RegType { public: - ReferenceType(mirror::Class* klass, const StringPiece& descriptor, + ReferenceType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) { CheckConstructorInvariants(this); @@ -1034,7 +1051,8 @@ class ReferenceType FINAL : public RegType { // type. class PreciseReferenceType FINAL : public RegType { public: - PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor, + PreciseReferenceType(ObjPtr<mirror::Class> klass, + const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h index 0469a3b394..9f87adfa31 100644 --- a/runtime/verifier/reg_type_cache-inl.h +++ b/runtime/verifier/reg_type_cache-inl.h @@ -125,7 +125,7 @@ inline const ImpreciseConstType& RegTypeCache::PosShortConstant() { inline const PreciseReferenceType& RegTypeCache::JavaLangClass() { const RegType* result = &FromClass("Ljava/lang/Class;", - GetClassRoot<mirror::Class>().Ptr(), + GetClassRoot<mirror::Class>(), /* precise */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); @@ -134,7 +134,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangClass() { inline const PreciseReferenceType& RegTypeCache::JavaLangString() { // String is final and therefore always precise. const RegType* result = &FromClass("Ljava/lang/String;", - GetClassRoot<mirror::String>().Ptr(), + GetClassRoot<mirror::String>(), /* precise */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); @@ -142,7 +142,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangString() { inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() { const RegType* result = &FromClass("Ljava/lang/invoke/MethodHandle;", - GetClassRoot<mirror::MethodHandle>().Ptr(), + GetClassRoot<mirror::MethodHandle>(), /* precise */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); @@ -150,7 +150,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() { inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() { const RegType* result = &FromClass("Ljava/lang/invoke/MethodType;", - GetClassRoot<mirror::MethodType>().Ptr(), + GetClassRoot<mirror::MethodType>(), /* precise */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); @@ -158,7 +158,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() { inline const RegType& RegTypeCache::JavaLangThrowable(bool precise) { const RegType* result = &FromClass("Ljava/lang/Throwable;", - GetClassRoot<mirror::Throwable>().Ptr(), + GetClassRoot<mirror::Throwable>(), precise); if (precise) { DCHECK(result->IsPreciseReference()); @@ -170,9 +170,7 @@ inline const RegType& RegTypeCache::JavaLangThrowable(bool precise) { } inline const RegType& RegTypeCache::JavaLangObject(bool precise) { - const RegType* result = &FromClass("Ljava/lang/Object;", - GetClassRoot<mirror::Object>().Ptr(), - precise); + const RegType* result = &FromClass("Ljava/lang/Object;", GetClassRoot<mirror::Object>(), precise); if (precise) { DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); @@ -187,7 +185,7 @@ inline RegTypeType& RegTypeCache::AddEntry(RegTypeType* new_entry) { DCHECK(new_entry != nullptr); entries_.push_back(new_entry); if (new_entry->HasClass()) { - mirror::Class* klass = new_entry->GetClass(); + ObjPtr<mirror::Class> klass = new_entry->GetClass(); DCHECK(!klass->IsPrimitive()); klass_entries_.push_back(std::make_pair(GcRoot<mirror::Class>(klass), new_entry)); } diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index 87fc60bd23..f1f3488a3c 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -77,7 +77,7 @@ void RegTypeCache::FillPrimitiveAndSmallConstantTypes() { DCHECK_EQ(entries_.size(), primitive_count_); } -const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, +const RegType& RegTypeCache::FromDescriptor(ObjPtr<mirror::ClassLoader> loader, const char* descriptor, bool precise) { DCHECK(RegTypeCache::primitive_initialized_); @@ -149,14 +149,15 @@ bool RegTypeCache::MatchDescriptor(size_t idx, const StringPiece& descriptor, bo return true; } -mirror::Class* RegTypeCache::ResolveClass(const char* descriptor, mirror::ClassLoader* loader) { +ObjPtr<mirror::Class> RegTypeCache::ResolveClass(const char* descriptor, + ObjPtr<mirror::ClassLoader> loader) { // Class was not found, must create new type. // Try resolving class ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Thread* self = Thread::Current(); StackHandleScope<1> hs(self); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(loader)); - mirror::Class* klass = nullptr; + ObjPtr<mirror::Class> klass = nullptr; if (can_load_classes_) { klass = class_linker->FindClass(self, descriptor, class_loader); } else { @@ -175,7 +176,7 @@ StringPiece RegTypeCache::AddString(const StringPiece& string_piece) { return StringPiece(ptr, string_piece.length()); } -const RegType& RegTypeCache::From(mirror::ClassLoader* loader, +const RegType& RegTypeCache::From(ObjPtr<mirror::ClassLoader> loader, const char* descriptor, bool precise) { StringPiece sp_descriptor(descriptor); @@ -188,7 +189,7 @@ const RegType& RegTypeCache::From(mirror::ClassLoader* loader, } // Class not found in the cache, will create a new type for that. // Try resolving class. - mirror::Class* klass = ResolveClass(descriptor, loader); + ObjPtr<mirror::Class> klass = ResolveClass(descriptor, loader); if (klass != nullptr) { // Class resolved, first look for the class in the list of entries // Class was not found, must create new type. @@ -234,7 +235,7 @@ const RegType& RegTypeCache::MakeUnresolvedReference() { return AddEntry(new (&allocator_) UnresolvedReferenceType(AddString("a"), entries_.size())); } -const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const { +const RegType* RegTypeCache::FindClass(ObjPtr<mirror::Class> klass, bool precise) const { DCHECK(klass != nullptr); if (klass->IsPrimitive()) { // Note: precise isn't used for primitive classes. A char is assignable to an int. All @@ -242,7 +243,7 @@ const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const return &RegTypeFromPrimitiveType(klass->GetPrimitiveType()); } for (auto& pair : klass_entries_) { - mirror::Class* const reg_klass = pair.first.Read(); + ObjPtr<mirror::Class> const reg_klass = pair.first.Read(); if (reg_klass == klass) { const RegType* reg_type = pair.second; if (MatchingPrecisionForClass(reg_type, precise)) { @@ -254,7 +255,7 @@ const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const } const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor, - mirror::Class* klass, + ObjPtr<mirror::Class> klass, bool precise) { // No reference to the class was found, create new reference. DCHECK(FindClass(klass, precise) == nullptr); @@ -265,7 +266,9 @@ const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor, return &AddEntry(reg_type); } -const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) { +const RegType& RegTypeCache::FromClass(const char* descriptor, + ObjPtr<mirror::Class> klass, + bool precise) { DCHECK(klass != nullptr); const RegType* reg_type = FindClass(klass, precise); if (reg_type == nullptr) { @@ -342,7 +345,7 @@ void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() { // code cannot leak to other users. auto create_primitive_type_instance = [&](auto type) REQUIRES_SHARED(Locks::mutator_lock_) { using Type = typename decltype(type)::type; - mirror::Class* klass = nullptr; + ObjPtr<mirror::Class> klass = nullptr; // Try loading the class from linker. DCHECK(type.descriptor != nullptr); if (strlen(type.descriptor) > 0) { @@ -500,7 +503,7 @@ const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32 allocation_pc, entries_.size()); } else { - mirror::Class* klass = type.GetClass(); + ObjPtr<mirror::Class> klass = type.GetClass(); for (size_t i = primitive_count_; i < entries_.size(); i++) { const RegType* cur_entry = entries_[i]; if (cur_entry->IsUninitializedReference() && @@ -532,7 +535,7 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) { } entry = new (&allocator_) UnresolvedReferenceType(descriptor, entries_.size()); } else { - mirror::Class* klass = uninit_type.GetClass(); + ObjPtr<mirror::Class> klass = uninit_type.GetClass(); if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) { // For uninitialized "this reference" look for reference types that are not precise. for (size_t i = primitive_count_; i < entries_.size(); i++) { @@ -583,7 +586,7 @@ const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& } entry = new (&allocator_) UnresolvedUninitializedThisRefType(descriptor, entries_.size()); } else { - mirror::Class* klass = type.GetClass(); + ObjPtr<mirror::Class> klass = type.GetClass(); for (size_t i = primitive_count_; i < entries_.size(); i++) { const RegType* cur_entry = entries_[i]; if (cur_entry->IsUninitializedThisReference() && cur_entry->GetClass() == klass) { @@ -647,7 +650,8 @@ const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) { return AddEntry(entry); } -const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) { +const RegType& RegTypeCache::GetComponentType(const RegType& array, + ObjPtr<mirror::ClassLoader> loader) { if (!array.IsArrayTypes()) { return Conflict(); } else if (array.IsUnresolvedTypes()) { @@ -655,7 +659,7 @@ const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::Clas const std::string descriptor(array.GetDescriptor().as_string()); return FromDescriptor(loader, descriptor.c_str() + 1, false); } else { - mirror::Class* klass = array.GetClass()->GetComponentType(); + ObjPtr<mirror::Class> klass = array.GetClass()->GetComponentType(); std::string temp; const char* descriptor = klass->GetDescriptor(&temp); if (klass->IsErroneous()) { diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h index b32dc115a7..d668222901 100644 --- a/runtime/verifier/reg_type_cache.h +++ b/runtime/verifier/reg_type_cache.h @@ -74,16 +74,18 @@ class RegTypeCache { } static void ShutDown(); const art::verifier::RegType& GetFromId(uint16_t id) const; - const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise) + const RegType& From(ObjPtr<mirror::ClassLoader> loader, const char* descriptor, bool precise) REQUIRES_SHARED(Locks::mutator_lock_); // Find a RegType, returns null if not found. - const RegType* FindClass(mirror::Class* klass, bool precise) const + const RegType* FindClass(ObjPtr<mirror::Class> klass, bool precise) const REQUIRES_SHARED(Locks::mutator_lock_); // Insert a new class with a specified descriptor, must not already be in the cache. - const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise) + const RegType* InsertClass(const StringPiece& descriptor, + ObjPtr<mirror::Class> klass, + bool precise) REQUIRES_SHARED(Locks::mutator_lock_); // Get or insert a reg type for a description, klass, and precision. - const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise) + const RegType& FromClass(const char* descriptor, ObjPtr<mirror::Class> klass, bool precise) REQUIRES_SHARED(Locks::mutator_lock_); const ConstantType& FromCat1Const(int32_t value, bool precise) REQUIRES_SHARED(Locks::mutator_lock_); @@ -91,7 +93,9 @@ class RegTypeCache { REQUIRES_SHARED(Locks::mutator_lock_); const ConstantType& FromCat2ConstHi(int32_t value, bool precise) REQUIRES_SHARED(Locks::mutator_lock_); - const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise) + const RegType& FromDescriptor(ObjPtr<mirror::ClassLoader> loader, + const char* descriptor, + bool precise) REQUIRES_SHARED(Locks::mutator_lock_); const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right, @@ -146,7 +150,7 @@ class RegTypeCache { const ImpreciseConstType& IntConstant() REQUIRES_SHARED(Locks::mutator_lock_); const ImpreciseConstType& PosByteConstant() REQUIRES_SHARED(Locks::mutator_lock_); const ImpreciseConstType& PosShortConstant() REQUIRES_SHARED(Locks::mutator_lock_); - const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader) + const RegType& GetComponentType(const RegType& array, ObjPtr<mirror::ClassLoader> loader) REQUIRES_SHARED(Locks::mutator_lock_); void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_); const RegType& RegTypeFromPrimitiveType(Primitive::Type) const; @@ -158,7 +162,7 @@ class RegTypeCache { private: void FillPrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader) + ObjPtr<mirror::Class> ResolveClass(const char* descriptor, ObjPtr<mirror::ClassLoader> loader) REQUIRES_SHARED(Locks::mutator_lock_); bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc index fe839f7312..500cc37af4 100644 --- a/runtime/verifier/verifier_deps.cc +++ b/runtime/verifier/verifier_deps.cc @@ -77,8 +77,8 @@ const VerifierDeps::DexFileDeps* VerifierDeps::GetDexFileDeps(const DexFile& dex static constexpr uint32_t kAccVdexAccessFlags = kAccPublic | kAccPrivate | kAccProtected | kAccStatic | kAccInterface; -template <typename T> -uint16_t VerifierDeps::GetAccessFlags(T* element) { +template <typename Ptr> +uint16_t VerifierDeps::GetAccessFlags(Ptr element) { static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant"); if (element == nullptr) { return VerifierDeps::kUnresolvedMarker; @@ -277,7 +277,7 @@ bool VerifierDeps::IsInClassPath(ObjPtr<mirror::Class> klass) const { void VerifierDeps::AddClassResolution(const DexFile& dex_file, dex::TypeIndex type_idx, - mirror::Class* klass) { + ObjPtr<mirror::Class> klass) { DexFileDeps* dex_deps = GetDexFileDeps(dex_file); if (dex_deps == nullptr) { // This invocation is from verification of a dex file which is not being compiled. @@ -336,12 +336,13 @@ void VerifierDeps::AddMethodResolution(const DexFile& dex_file, dex_deps->methods_.insert(method_tuple); } -mirror::Class* VerifierDeps::FindOneClassPathBoundaryForInterface(mirror::Class* destination, - mirror::Class* source) const { +ObjPtr<mirror::Class> VerifierDeps::FindOneClassPathBoundaryForInterface( + ObjPtr<mirror::Class> destination, + ObjPtr<mirror::Class> source) const { DCHECK(destination->IsInterface()); DCHECK(IsInClassPath(destination)); Thread* thread = Thread::Current(); - mirror::Class* current = source; + ObjPtr<mirror::Class> current = source; // Record the classes that are at the boundary between the compiled DEX files and // the classpath. We will check those classes later to find one class that inherits // `destination`. @@ -367,7 +368,7 @@ mirror::Class* VerifierDeps::FindOneClassPathBoundaryForInterface(mirror::Class* int32_t iftable_count = source->GetIfTableCount(); ObjPtr<mirror::IfTable> iftable = source->GetIfTable(); for (int32_t i = 0; i < iftable_count; ++i) { - mirror::Class* itf = iftable->GetInterface(i); + ObjPtr<mirror::Class> itf = iftable->GetInterface(i); if (!IsInClassPath(itf)) { for (size_t j = 0; j < itf->NumDirectInterfaces(); ++j) { ObjPtr<mirror::Class> direct = mirror::Class::GetDirectInterface(thread, itf, j); @@ -391,8 +392,8 @@ mirror::Class* VerifierDeps::FindOneClassPathBoundaryForInterface(mirror::Class* } void VerifierDeps::AddAssignability(const DexFile& dex_file, - mirror::Class* destination, - mirror::Class* source, + ObjPtr<mirror::Class> destination, + ObjPtr<mirror::Class> source, bool is_strict, bool is_assignable) { // Test that the method is only called on reference types. @@ -429,8 +430,8 @@ void VerifierDeps::AddAssignability(const DexFile& dex_file, // Both types are arrays. Break down to component types and add recursively. // This helps filter out destinations from compiled DEX files (see below) // and deduplicate entries with the same canonical component type. - mirror::Class* destination_component = destination->GetComponentType(); - mirror::Class* source_component = source->GetComponentType(); + ObjPtr<mirror::Class> destination_component = destination->GetComponentType(); + ObjPtr<mirror::Class> source_component = source->GetComponentType(); // Only perform the optimization if both types are resolved which guarantees // that they linked successfully, as required at the top of this method. @@ -511,7 +512,7 @@ void VerifierDeps::MaybeRecordVerificationStatus(const DexFile& dex_file, void VerifierDeps::MaybeRecordClassResolution(const DexFile& dex_file, dex::TypeIndex type_idx, - mirror::Class* klass) { + ObjPtr<mirror::Class> klass) { VerifierDeps* thread_deps = GetThreadLocalVerifierDeps(); if (thread_deps != nullptr) { thread_deps->AddClassResolution(dex_file, type_idx, klass); @@ -537,8 +538,8 @@ void VerifierDeps::MaybeRecordMethodResolution(const DexFile& dex_file, } void VerifierDeps::MaybeRecordAssignability(const DexFile& dex_file, - mirror::Class* destination, - mirror::Class* source, + ObjPtr<mirror::Class> destination, + ObjPtr<mirror::Class> source, bool is_strict, bool is_assignable) { VerifierDeps* thread_deps = GetThreadLocalVerifierDeps(); @@ -858,12 +859,12 @@ bool VerifierDeps::ValidateDependencies(Handle<mirror::ClassLoader> class_loader // TODO: share that helper with other parts of the compiler that have // the same lookup pattern. -static mirror::Class* FindClassAndClearException(ClassLinker* class_linker, - Thread* self, - const char* name, - Handle<mirror::ClassLoader> class_loader) +static ObjPtr<mirror::Class> FindClassAndClearException(ClassLinker* class_linker, + Thread* self, + const char* name, + Handle<mirror::ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_) { - mirror::Class* result = class_linker->FindClass(self, name, class_loader); + ObjPtr<mirror::Class> result = class_linker->FindClass(self, name, class_loader); if (result == nullptr) { DCHECK(self->IsExceptionPending()); self->ClearException(); @@ -971,7 +972,7 @@ bool VerifierDeps::VerifyFields(Handle<mirror::ClassLoader> class_loader, std::string expected_decl_klass = entry.IsResolved() ? GetStringFromId(dex_file, entry.GetDeclaringClassIndex()) : dex_file.StringByTypeIdx(field_id.class_idx_); - mirror::Class* cls = FindClassAndClearException( + ObjPtr<mirror::Class> cls = FindClassAndClearException( class_linker, self, expected_decl_klass.c_str(), class_loader); if (cls == nullptr) { LOG(INFO) << "VerifierDeps: Could not resolve class " << expected_decl_klass; @@ -1034,7 +1035,7 @@ bool VerifierDeps::VerifyMethods(Handle<mirror::ClassLoader> class_loader, ? GetStringFromId(dex_file, entry.GetDeclaringClassIndex()) : dex_file.StringByTypeIdx(method_id.class_idx_); - mirror::Class* cls = FindClassAndClearException( + ObjPtr<mirror::Class> cls = FindClassAndClearException( class_linker, self, expected_decl_klass.c_str(), class_loader); if (cls == nullptr) { LOG(INFO) << "VerifierDeps: Could not resolve class " << expected_decl_klass; diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h index 94441da7e2..0146b17020 100644 --- a/runtime/verifier/verifier_deps.h +++ b/runtime/verifier/verifier_deps.h @@ -75,7 +75,7 @@ class VerifierDeps { // If `klass` is null, the class is assumed unresolved. static void MaybeRecordClassResolution(const DexFile& dex_file, dex::TypeIndex type_idx, - mirror::Class* klass) + ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::verifier_deps_lock_); @@ -99,8 +99,8 @@ class VerifierDeps { // to `destination` as defined by RegType::AssignableFrom. `dex_file` is the // owner of the method for which MethodVerifier performed the assignability test. static void MaybeRecordAssignability(const DexFile& dex_file, - mirror::Class* destination, - mirror::Class* source, + ObjPtr<mirror::Class> destination, + ObjPtr<mirror::Class> source, bool is_strict, bool is_assignable) REQUIRES_SHARED(Locks::mutator_lock_) @@ -218,8 +218,8 @@ class VerifierDeps { // Finds the class in the classpath that makes `source` inherit` from `destination`. // Returns null if a class defined in the compiled DEX files, and assignable to // `source`, direclty inherits from `destination`. - mirror::Class* FindOneClassPathBoundaryForInterface(mirror::Class* destination, - mirror::Class* source) const + ObjPtr<mirror::Class> FindOneClassPathBoundaryForInterface(ObjPtr<mirror::Class> destination, + ObjPtr<mirror::Class> source) const REQUIRES_SHARED(Locks::mutator_lock_); // Returns the index of `str`. If it is defined in `dex_file_`, this is the dex @@ -234,8 +234,8 @@ class VerifierDeps { // Returns the bytecode access flags of `element` (bottom 16 bits), or // `kUnresolvedMarker` if `element` is null. - template <typename T> - static uint16_t GetAccessFlags(T* element) + template <typename Ptr> + static uint16_t GetAccessFlags(Ptr element) REQUIRES_SHARED(Locks::mutator_lock_); // Returns a string ID of the descriptor of the declaring class of `element`, @@ -256,7 +256,7 @@ class VerifierDeps { void AddClassResolution(const DexFile& dex_file, dex::TypeIndex type_idx, - mirror::Class* klass) + ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::verifier_deps_lock_); @@ -273,8 +273,8 @@ class VerifierDeps { REQUIRES(!Locks::verifier_deps_lock_); void AddAssignability(const DexFile& dex_file, - mirror::Class* destination, - mirror::Class* source, + ObjPtr<mirror::Class> destination, + ObjPtr<mirror::Class> source, bool is_strict, bool is_assignable) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index f7cdf3920a..c64e7bbca1 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -55,8 +55,6 @@ jclass WellKnownClasses::java_lang_ClassLoader; jclass WellKnownClasses::java_lang_ClassNotFoundException; jclass WellKnownClasses::java_lang_Daemons; jclass WellKnownClasses::java_lang_Error; -jclass WellKnownClasses::java_lang_invoke_MethodHandle; -jclass WellKnownClasses::java_lang_invoke_VarHandle; jclass WellKnownClasses::java_lang_IllegalAccessError; jclass WellKnownClasses::java_lang_NoClassDefFoundError; jclass WellKnownClasses::java_lang_Object; @@ -74,7 +72,6 @@ jclass WellKnownClasses::java_lang_ThreadGroup; jclass WellKnownClasses::java_lang_Throwable; jclass WellKnownClasses::java_nio_ByteBuffer; jclass WellKnownClasses::java_nio_DirectByteBuffer; -jclass WellKnownClasses::java_util_ArrayList; jclass WellKnownClasses::java_util_Collections; jclass WellKnownClasses::java_util_function_Consumer; jclass WellKnownClasses::libcore_reflect_AnnotationFactory; @@ -90,14 +87,11 @@ jmethodID WellKnownClasses::java_lang_Byte_valueOf; jmethodID WellKnownClasses::java_lang_Character_valueOf; jmethodID WellKnownClasses::java_lang_ClassLoader_loadClass; jmethodID WellKnownClasses::java_lang_ClassNotFoundException_init; -jmethodID WellKnownClasses::java_lang_Daemons_requestHeapTrim; jmethodID WellKnownClasses::java_lang_Daemons_start; jmethodID WellKnownClasses::java_lang_Daemons_stop; jmethodID WellKnownClasses::java_lang_Double_valueOf; jmethodID WellKnownClasses::java_lang_Float_valueOf; jmethodID WellKnownClasses::java_lang_Integer_valueOf; -jmethodID WellKnownClasses::java_lang_invoke_MethodHandle_invoke; -jmethodID WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact; jmethodID WellKnownClasses::java_lang_invoke_MethodHandles_lookup; jmethodID WellKnownClasses::java_lang_invoke_MethodHandles_Lookup_findConstructor; jmethodID WellKnownClasses::java_lang_Long_valueOf; @@ -108,7 +102,6 @@ jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke; jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad; jmethodID WellKnownClasses::java_lang_Short_valueOf; jmethodID WellKnownClasses::java_lang_String_charAt; -jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr; jmethodID WellKnownClasses::java_lang_Thread_dispatchUncaughtException; jmethodID WellKnownClasses::java_lang_Thread_init; jmethodID WellKnownClasses::java_lang_Thread_run; @@ -144,7 +137,6 @@ jfieldID WellKnownClasses::java_lang_Throwable_detailMessage; jfieldID WellKnownClasses::java_lang_Throwable_stackTrace; jfieldID WellKnownClasses::java_lang_Throwable_stackState; jfieldID WellKnownClasses::java_lang_Throwable_suppressedExceptions; -jfieldID WellKnownClasses::java_lang_reflect_Proxy_h; jfieldID WellKnownClasses::java_nio_ByteBuffer_address; jfieldID WellKnownClasses::java_nio_ByteBuffer_hb; jfieldID WellKnownClasses::java_nio_ByteBuffer_isReadOnly; @@ -152,8 +144,6 @@ jfieldID WellKnownClasses::java_nio_ByteBuffer_limit; jfieldID WellKnownClasses::java_nio_ByteBuffer_offset; jfieldID WellKnownClasses::java_nio_DirectByteBuffer_capacity; jfieldID WellKnownClasses::java_nio_DirectByteBuffer_effectiveDirectAddress; -jfieldID WellKnownClasses::java_util_ArrayList_array; -jfieldID WellKnownClasses::java_util_ArrayList_size; jfieldID WellKnownClasses::java_util_Collections_EMPTY_LIST; jfieldID WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT; jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data; @@ -323,8 +313,6 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_OutOfMemoryError = CacheClass(env, "java/lang/OutOfMemoryError"); java_lang_Error = CacheClass(env, "java/lang/Error"); java_lang_IllegalAccessError = CacheClass(env, "java/lang/IllegalAccessError"); - java_lang_invoke_MethodHandle = CacheClass(env, "java/lang/invoke/MethodHandle"); - java_lang_invoke_VarHandle = CacheClass(env, "java/lang/invoke/VarHandle"); java_lang_NoClassDefFoundError = CacheClass(env, "java/lang/NoClassDefFoundError"); java_lang_reflect_Parameter = CacheClass(env, "java/lang/reflect/Parameter"); java_lang_reflect_Parameter__array = CacheClass(env, "[Ljava/lang/reflect/Parameter;"); @@ -339,7 +327,6 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_Throwable = CacheClass(env, "java/lang/Throwable"); java_nio_ByteBuffer = CacheClass(env, "java/nio/ByteBuffer"); java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer"); - java_util_ArrayList = CacheClass(env, "java/util/ArrayList"); java_util_Collections = CacheClass(env, "java/util/Collections"); java_util_function_Consumer = CacheClass(env, "java/util/function/Consumer"); libcore_reflect_AnnotationFactory = CacheClass(env, "libcore/reflect/AnnotationFactory"); @@ -353,11 +340,8 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "<init>", "(Ljava/lang/String;Ljava/lang/Throwable;)V"); java_lang_ClassLoader_loadClass = CacheMethod(env, java_lang_ClassLoader, false, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;"); - java_lang_Daemons_requestHeapTrim = CacheMethod(env, java_lang_Daemons, true, "requestHeapTrim", "()V"); java_lang_Daemons_start = CacheMethod(env, java_lang_Daemons, true, "start", "()V"); java_lang_Daemons_stop = CacheMethod(env, java_lang_Daemons, true, "stop", "()V"); - java_lang_invoke_MethodHandle_invoke = CacheMethod(env, java_lang_invoke_MethodHandle, false, "invoke", "([Ljava/lang/Object;)Ljava/lang/Object;"); - java_lang_invoke_MethodHandle_invokeExact = CacheMethod(env, java_lang_invoke_MethodHandle, false, "invokeExact", "([Ljava/lang/Object;)Ljava/lang/Object;"); java_lang_invoke_MethodHandles_lookup = CacheMethod(env, "java/lang/invoke/MethodHandles", true, "lookup", "()Ljava/lang/invoke/MethodHandles$Lookup;"); java_lang_invoke_MethodHandles_Lookup_findConstructor = CacheMethod(env, "java/lang/invoke/MethodHandles$Lookup", false, "findConstructor", "(Ljava/lang/Class;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/MethodHandle;"); @@ -408,8 +392,6 @@ void WellKnownClasses::Init(JNIEnv* env) { java_nio_ByteBuffer_offset = CacheField(env, java_nio_ByteBuffer, false, "offset", "I"); java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I"); java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "address", "J"); - java_util_ArrayList_array = CacheField(env, java_util_ArrayList, false, "elementData", "[Ljava/lang/Object;"); - java_util_ArrayList_size = CacheField(env, java_util_ArrayList, false, "size", "I"); java_util_Collections_EMPTY_LIST = CacheField(env, java_util_Collections, true, "EMPTY_LIST", "Ljava/util/List;"); libcore_util_EmptyArray_STACK_TRACE_ELEMENT = CacheField(env, libcore_util_EmptyArray, true, "STACK_TRACE_ELEMENT", "[Ljava/lang/StackTraceElement;"); org_apache_harmony_dalvik_ddmc_Chunk_data = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "data", "[B"); @@ -440,9 +422,6 @@ void WellKnownClasses::LateInit(JNIEnv* env) { CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;" "[Ljava/lang/Object;)Ljava/lang/Object;"); - java_lang_reflect_Proxy_h = - CacheField(env, java_lang_reflect_Proxy, false, "h", - "Ljava/lang/reflect/InvocationHandler;"); } void WellKnownClasses::Clear() { @@ -464,8 +443,6 @@ void WellKnownClasses::Clear() { java_lang_Daemons = nullptr; java_lang_Error = nullptr; java_lang_IllegalAccessError = nullptr; - java_lang_invoke_MethodHandle = nullptr; - java_lang_invoke_VarHandle = nullptr; java_lang_NoClassDefFoundError = nullptr; java_lang_Object = nullptr; java_lang_OutOfMemoryError = nullptr; @@ -480,7 +457,6 @@ void WellKnownClasses::Clear() { java_lang_Thread = nullptr; java_lang_ThreadGroup = nullptr; java_lang_Throwable = nullptr; - java_util_ArrayList = nullptr; java_util_Collections = nullptr; java_nio_ByteBuffer = nullptr; java_nio_DirectByteBuffer = nullptr; @@ -497,14 +473,11 @@ void WellKnownClasses::Clear() { java_lang_Character_valueOf = nullptr; java_lang_ClassLoader_loadClass = nullptr; java_lang_ClassNotFoundException_init = nullptr; - java_lang_Daemons_requestHeapTrim = nullptr; java_lang_Daemons_start = nullptr; java_lang_Daemons_stop = nullptr; java_lang_Double_valueOf = nullptr; java_lang_Float_valueOf = nullptr; java_lang_Integer_valueOf = nullptr; - java_lang_invoke_MethodHandle_invoke = nullptr; - java_lang_invoke_MethodHandle_invokeExact = nullptr; java_lang_invoke_MethodHandles_lookup = nullptr; java_lang_invoke_MethodHandles_Lookup_findConstructor = nullptr; java_lang_Long_valueOf = nullptr; @@ -515,7 +488,6 @@ void WellKnownClasses::Clear() { java_lang_Runtime_nativeLoad = nullptr; java_lang_Short_valueOf = nullptr; java_lang_String_charAt = nullptr; - java_lang_System_runFinalization = nullptr; java_lang_Thread_dispatchUncaughtException = nullptr; java_lang_Thread_init = nullptr; java_lang_Thread_run = nullptr; @@ -533,7 +505,6 @@ void WellKnownClasses::Clear() { dalvik_system_DexPathList_dexElements = nullptr; dalvik_system_DexPathList__Element_dexFile = nullptr; dalvik_system_VMRuntime_nonSdkApiUsageConsumer = nullptr; - java_lang_reflect_Proxy_h = nullptr; java_lang_Thread_daemon = nullptr; java_lang_Thread_group = nullptr; java_lang_Thread_lock = nullptr; @@ -558,8 +529,6 @@ void WellKnownClasses::Clear() { java_nio_ByteBuffer_offset = nullptr; java_nio_DirectByteBuffer_capacity = nullptr; java_nio_DirectByteBuffer_effectiveDirectAddress = nullptr; - java_util_ArrayList_array = nullptr; - java_util_ArrayList_size = nullptr; java_util_Collections_EMPTY_LIST = nullptr; libcore_util_EmptyArray_STACK_TRACE_ELEMENT = nullptr; org_apache_harmony_dalvik_ddmc_Chunk_data = nullptr; diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h index c06e4a71ce..c81062f594 100644 --- a/runtime/well_known_classes.h +++ b/runtime/well_known_classes.h @@ -66,8 +66,6 @@ struct WellKnownClasses { static jclass java_lang_Daemons; static jclass java_lang_Error; static jclass java_lang_IllegalAccessError; - static jclass java_lang_invoke_MethodHandle; - static jclass java_lang_invoke_VarHandle; static jclass java_lang_NoClassDefFoundError; static jclass java_lang_Object; static jclass java_lang_OutOfMemoryError; @@ -82,7 +80,6 @@ struct WellKnownClasses { static jclass java_lang_Thread; static jclass java_lang_ThreadGroup; static jclass java_lang_Throwable; - static jclass java_util_ArrayList; static jclass java_util_Collections; static jclass java_util_function_Consumer; static jclass java_nio_ByteBuffer; @@ -100,14 +97,11 @@ struct WellKnownClasses { static jmethodID java_lang_Character_valueOf; static jmethodID java_lang_ClassLoader_loadClass; static jmethodID java_lang_ClassNotFoundException_init; - static jmethodID java_lang_Daemons_requestHeapTrim; static jmethodID java_lang_Daemons_start; static jmethodID java_lang_Daemons_stop; static jmethodID java_lang_Double_valueOf; static jmethodID java_lang_Float_valueOf; static jmethodID java_lang_Integer_valueOf; - static jmethodID java_lang_invoke_MethodHandle_invoke; - static jmethodID java_lang_invoke_MethodHandle_invokeExact; static jmethodID java_lang_invoke_MethodHandles_lookup; static jmethodID java_lang_invoke_MethodHandles_Lookup_findConstructor; static jmethodID java_lang_Long_valueOf; @@ -118,7 +112,6 @@ struct WellKnownClasses { static jmethodID java_lang_Runtime_nativeLoad; static jmethodID java_lang_Short_valueOf; static jmethodID java_lang_String_charAt; - static jmethodID java_lang_System_runFinalization; static jmethodID java_lang_Thread_dispatchUncaughtException; static jmethodID java_lang_Thread_init; static jmethodID java_lang_Thread_run; @@ -137,7 +130,6 @@ struct WellKnownClasses { static jfieldID dalvik_system_DexPathList_dexElements; static jfieldID dalvik_system_DexPathList__Element_dexFile; static jfieldID dalvik_system_VMRuntime_nonSdkApiUsageConsumer; - static jfieldID java_lang_reflect_Proxy_h; static jfieldID java_lang_Thread_daemon; static jfieldID java_lang_Thread_group; static jfieldID java_lang_Thread_lock; @@ -163,8 +155,6 @@ struct WellKnownClasses { static jfieldID java_nio_DirectByteBuffer_capacity; static jfieldID java_nio_DirectByteBuffer_effectiveDirectAddress; - static jfieldID java_util_ArrayList_array; - static jfieldID java_util_ArrayList_size; static jfieldID java_util_Collections_EMPTY_LIST; static jfieldID libcore_util_EmptyArray_STACK_TRACE_ELEMENT; static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_data; diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc index 49db0c82b5..7ada47d304 100644 --- a/test/137-cfi/cfi.cc +++ b/test/137-cfi/cfi.cc @@ -56,9 +56,12 @@ static void CauseSegfault() { extern "C" JNIEXPORT jboolean JNICALL Java_Main_sleep(JNIEnv*, jobject, jint, jboolean, jdouble) { // Keep pausing. + struct timespec ts = { .tv_sec = 100, .tv_nsec = 0 }; printf("Going to sleep\n"); for (;;) { - sleep(1); + // Use nanosleep since it gets to the system call quickly and doesn't + // have any points at which an unwind will fail. + nanosleep(&ts, nullptr); } } diff --git a/test/530-checker-lse/build b/test/530-checker-lse/build deleted file mode 100755 index 10ffcc537d..0000000000 --- a/test/530-checker-lse/build +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# -# Copyright 2017 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# See b/65168732 -export USE_D8=false - -./default-build "$@" diff --git a/test/530-checker-lse/smali/Main.smali b/test/530-checker-lse/smali/Main.smali new file mode 100644 index 0000000000..267801760f --- /dev/null +++ b/test/530-checker-lse/smali/Main.smali @@ -0,0 +1,260 @@ +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMain2; +.super Ljava/lang/Object; +.source "Main.java" + +# direct methods + +## CHECK-START: int Main2.test4(TestClass, boolean) load_store_elimination (before) +## CHECK: InstanceFieldSet +## CHECK: InstanceFieldGet +## CHECK: Return +## CHECK: InstanceFieldSet + +## CHECK-START: int Main2.test4(TestClass, boolean) load_store_elimination (after) +## CHECK: InstanceFieldSet +## CHECK-NOT: NullCheck +## CHECK-NOT: InstanceFieldGet +## CHECK: Return +## CHECK: InstanceFieldSet + +# Set and merge the same value in two branches. + +# Original java source: +# +# static int test4(TestClass obj, boolean b) { +# if (b) { +# obj.i = 1; +# } else { +# obj.i = 1; +# } +# return obj.i; +# } + +.method public static test4(LTestClass;Z)I + .registers 3 + .param p0, "obj" # LTestClass; + .param p1, "b" # Z + + .prologue + const/4 v0, 0x1 + + .line 185 + if-eqz p1, :cond_8 + + .line 186 + iput v0, p0, LTestClass;->i:I + + .line 190 + :goto_5 + iget v0, p0, LTestClass;->i:I + + return v0 + + .line 188 + :cond_8 + iput v0, p0, LTestClass;->i:I + + goto :goto_5 +.end method + +## CHECK-START: int Main2.test5(TestClass, boolean) load_store_elimination (before) +## CHECK: InstanceFieldSet +## CHECK: InstanceFieldGet +## CHECK: Return +## CHECK: InstanceFieldSet + +## CHECK-START: int Main2.test5(TestClass, boolean) load_store_elimination (after) +## CHECK: InstanceFieldSet +## CHECK: InstanceFieldGet +## CHECK: Return +## CHECK: InstanceFieldSet + +# Set and merge different values in two branches. +# Original java source: +# +# static int test5(TestClass obj, boolean b) { +# if (b) { +# obj.i = 1; +# } else { +# obj.i = 2; +# } +# return obj.i; +# } + +.method public static test5(LTestClass;Z)I + .registers 3 + .param p0, "obj" # LTestClass; + .param p1, "b" # Z + + .prologue + .line 207 + if-eqz p1, :cond_8 + + .line 208 + const/4 v0, 0x1 + + iput v0, p0, LTestClass;->i:I + + .line 212 + :goto_5 + iget v0, p0, LTestClass;->i:I + + return v0 + + .line 210 + :cond_8 + const/4 v0, 0x2 + + iput v0, p0, LTestClass;->i:I + + goto :goto_5 +.end method + +## CHECK-START: int Main2.test23(boolean) load_store_elimination (before) +## CHECK: NewInstance +## CHECK: InstanceFieldSet +## CHECK: InstanceFieldGet +## CHECK: InstanceFieldSet +## CHECK: InstanceFieldGet +## CHECK: Return +## CHECK: InstanceFieldGet +## CHECK: InstanceFieldSet + +## CHECK-START: int Main2.test23(boolean) load_store_elimination (after) +## CHECK: NewInstance +## CHECK-NOT: InstanceFieldSet +## CHECK-NOT: InstanceFieldGet +## CHECK: InstanceFieldSet +## CHECK: InstanceFieldGet +## CHECK: Return +## CHECK-NOT: InstanceFieldGet +## CHECK: InstanceFieldSet + +# Test store elimination on merging. + +# Original java source: +# +# static int test23(boolean b) { +# TestClass obj = new TestClass(); +# obj.i = 3; // This store can be eliminated since the value flows into each branch. +# if (b) { +# obj.i += 1; // This store cannot be eliminated due to the merge later. +# } else { +# obj.i += 2; // This store cannot be eliminated due to the merge later. +# } +# return obj.i; +# } + +.method public static test23(Z)I + .registers 3 + .param p0, "b" # Z + + .prologue + .line 582 + new-instance v0, LTestClass; + + invoke-direct {v0}, LTestClass;-><init>()V + + .line 583 + .local v0, "obj":LTestClass; + const/4 v1, 0x3 + + iput v1, v0, LTestClass;->i:I + + .line 584 + if-eqz p0, :cond_13 + + .line 585 + iget v1, v0, LTestClass;->i:I + + add-int/lit8 v1, v1, 0x1 + + iput v1, v0, LTestClass;->i:I + + .line 589 + :goto_10 + iget v1, v0, LTestClass;->i:I + + return v1 + + .line 587 + :cond_13 + iget v1, v0, LTestClass;->i:I + + add-int/lit8 v1, v1, 0x2 + + iput v1, v0, LTestClass;->i:I + + goto :goto_10 +.end method + +## CHECK-START: float Main2.test24() load_store_elimination (before) +## CHECK-DAG: <<True:i\d+>> IntConstant 1 +## CHECK-DAG: <<Float8:f\d+>> FloatConstant 8 +## CHECK-DAG: <<Float42:f\d+>> FloatConstant 42 +## CHECK-DAG: <<Obj:l\d+>> NewInstance +## CHECK-DAG: InstanceFieldSet [<<Obj>>,<<True>>] +## CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Float8>>] +## CHECK-DAG: <<GetTest:z\d+>> InstanceFieldGet [<<Obj>>] +## CHECK-DAG: <<GetField:f\d+>> InstanceFieldGet [<<Obj>>] +## CHECK-DAG: <<Select:f\d+>> Select [<<Float42>>,<<GetField>>,<<GetTest>>] +## CHECK-DAG: Return [<<Select>>] + +## CHECK-START: float Main2.test24() load_store_elimination (after) +## CHECK-DAG: <<True:i\d+>> IntConstant 1 +## CHECK-DAG: <<Float8:f\d+>> FloatConstant 8 +## CHECK-DAG: <<Float42:f\d+>> FloatConstant 42 +## CHECK-DAG: <<Select:f\d+>> Select [<<Float42>>,<<Float8>>,<<True>>] +## CHECK-DAG: Return [<<Select>>] + +# Original java source: +# +# static float test24() { +# float a = 42.0f; +# TestClass3 obj = new TestClass3(); +# if (obj.test1) { +# a = obj.floatField; +# } +# return a; +# } + +.method public static test24()F + .registers 3 + + .prologue + .line 612 + const/high16 v0, 0x42280000 # 42.0f + + .line 613 + .local v0, "a":F + new-instance v1, LTestClass3; + + invoke-direct {v1}, LTestClass3;-><init>()V + + .line 614 + .local v1, "obj":LTestClass3; + iget-boolean v2, v1, LTestClass3;->test1:Z + + if-eqz v2, :cond_d + + .line 615 + iget v0, v1, LTestClass3;->floatField:F + + .line 617 + :cond_d + return v0 +.end method diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java index 93c153821b..bd1744cc5f 100644 --- a/test/530-checker-lse/src/Main.java +++ b/test/530-checker-lse/src/Main.java @@ -14,6 +14,8 @@ * limitations under the License. */ +import java.lang.reflect.Method; + class Circle { Circle(double radius) { this.radius = radius; @@ -167,51 +169,6 @@ public class Main { return obj.i + obj1.j + obj2.i + obj2.j; } - /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (before) - /// CHECK: InstanceFieldSet - /// CHECK: InstanceFieldGet - /// CHECK: Return - /// CHECK: InstanceFieldSet - - /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (after) - /// CHECK: InstanceFieldSet - /// CHECK-NOT: NullCheck - /// CHECK-NOT: InstanceFieldGet - /// CHECK: Return - /// CHECK: InstanceFieldSet - - // Set and merge the same value in two branches. - static int test4(TestClass obj, boolean b) { - if (b) { - obj.i = 1; - } else { - obj.i = 1; - } - return obj.i; - } - - /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (before) - /// CHECK: InstanceFieldSet - /// CHECK: InstanceFieldGet - /// CHECK: Return - /// CHECK: InstanceFieldSet - - /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (after) - /// CHECK: InstanceFieldSet - /// CHECK: InstanceFieldGet - /// CHECK: Return - /// CHECK: InstanceFieldSet - - // Set and merge different values in two branches. - static int test5(TestClass obj, boolean b) { - if (b) { - obj.i = 1; - } else { - obj.i = 2; - } - return obj.i; - } - /// CHECK-START: int Main.test6(TestClass, TestClass, boolean) load_store_elimination (before) /// CHECK: InstanceFieldSet /// CHECK: InstanceFieldSet @@ -557,66 +514,6 @@ public class Main { return sum; } - /// CHECK-START: int Main.test23(boolean) load_store_elimination (before) - /// CHECK: NewInstance - /// CHECK: InstanceFieldSet - /// CHECK: InstanceFieldGet - /// CHECK: InstanceFieldSet - /// CHECK: InstanceFieldGet - /// CHECK: Return - /// CHECK: InstanceFieldGet - /// CHECK: InstanceFieldSet - - /// CHECK-START: int Main.test23(boolean) load_store_elimination (after) - /// CHECK: NewInstance - /// CHECK-NOT: InstanceFieldSet - /// CHECK-NOT: InstanceFieldGet - /// CHECK: InstanceFieldSet - /// CHECK: InstanceFieldGet - /// CHECK: Return - /// CHECK-NOT: InstanceFieldGet - /// CHECK: InstanceFieldSet - - // Test store elimination on merging. - static int test23(boolean b) { - TestClass obj = new TestClass(); - obj.i = 3; // This store can be eliminated since the value flows into each branch. - if (b) { - obj.i += 1; // This store cannot be eliminated due to the merge later. - } else { - obj.i += 2; // This store cannot be eliminated due to the merge later. - } - return obj.i; - } - - /// CHECK-START: float Main.test24() load_store_elimination (before) - /// CHECK-DAG: <<True:i\d+>> IntConstant 1 - /// CHECK-DAG: <<Float8:f\d+>> FloatConstant 8 - /// CHECK-DAG: <<Float42:f\d+>> FloatConstant 42 - /// CHECK-DAG: <<Obj:l\d+>> NewInstance - /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<True>>] - /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Float8>>] - /// CHECK-DAG: <<GetTest:z\d+>> InstanceFieldGet [<<Obj>>] - /// CHECK-DAG: <<GetField:f\d+>> InstanceFieldGet [<<Obj>>] - /// CHECK-DAG: <<Select:f\d+>> Select [<<Float42>>,<<GetField>>,<<GetTest>>] - /// CHECK-DAG: Return [<<Select>>] - - /// CHECK-START: float Main.test24() load_store_elimination (after) - /// CHECK-DAG: <<True:i\d+>> IntConstant 1 - /// CHECK-DAG: <<Float8:f\d+>> FloatConstant 8 - /// CHECK-DAG: <<Float42:f\d+>> FloatConstant 42 - /// CHECK-DAG: <<Select:f\d+>> Select [<<Float42>>,<<Float8>>,<<True>>] - /// CHECK-DAG: Return [<<Select>>] - - static float test24() { - float a = 42.0f; - TestClass3 obj = new TestClass3(); - if (obj.test1) { - a = obj.floatField; - } - return a; - } - /// CHECK-START: void Main.testFinalizable() load_store_elimination (before) /// CHECK: NewInstance /// CHECK: InstanceFieldSet @@ -1275,7 +1172,14 @@ public class Main { } } - public static void main(String[] args) { + public static void main(String[] args) throws Exception { + + Class main2 = Class.forName("Main2"); + Method test4 = main2.getMethod("test4", TestClass.class, boolean.class); + Method test5 = main2.getMethod("test5", TestClass.class, boolean.class); + Method test23 = main2.getMethod("test23", boolean.class); + Method test24 = main2.getMethod("test24"); + assertDoubleEquals(Math.PI * Math.PI * Math.PI, calcCircleArea(Math.PI)); assertIntEquals(test1(new TestClass(), new TestClass()), 3); assertIntEquals(test2(new TestClass()), 1); @@ -1283,10 +1187,10 @@ public class Main { TestClass obj2 = new TestClass(); obj1.next = obj2; assertIntEquals(test3(obj1), 10); - assertIntEquals(test4(new TestClass(), true), 1); - assertIntEquals(test4(new TestClass(), false), 1); - assertIntEquals(test5(new TestClass(), true), 1); - assertIntEquals(test5(new TestClass(), false), 2); + assertIntEquals((int)test4.invoke(null, new TestClass(), true), 1); + assertIntEquals((int)test4.invoke(null, new TestClass(), false), 1); + assertIntEquals((int)test5.invoke(null, new TestClass(), true), 1); + assertIntEquals((int)test5.invoke(null, new TestClass(), false), 2); assertIntEquals(test6(new TestClass(), new TestClass(), true), 4); assertIntEquals(test6(new TestClass(), new TestClass(), false), 2); assertIntEquals(test7(new TestClass()), 1); @@ -1312,9 +1216,9 @@ public class Main { assertFloatEquals(test20().i, 0); test21(new TestClass()); assertIntEquals(test22(), 13); - assertIntEquals(test23(true), 4); - assertIntEquals(test23(false), 5); - assertFloatEquals(test24(), 8.0f); + assertIntEquals((int)test23.invoke(null, true), 4); + assertIntEquals((int)test23.invoke(null, false), 5); + assertFloatEquals((float)test24.invoke(null), 8.0f); testFinalizableByForcingGc(); assertIntEquals($noinline$testHSelect(true), 0xdead); int[] array = {2, 5, 9, -1, -3, 10, 8, 4}; diff --git a/tools/dexanalyze/dexanalyze.cc b/tools/dexanalyze/dexanalyze.cc index 083de7066d..38725d428b 100644 --- a/tools/dexanalyze/dexanalyze.cc +++ b/tools/dexanalyze/dexanalyze.cc @@ -88,6 +88,7 @@ class DexAnalyze { bool run_dex_file_verifier_ = true; bool dump_per_input_dex_ = false; bool exp_count_indices_ = false; + bool exp_code_metrics_ = false; bool exp_analyze_strings_ = false; bool run_all_experiments_ = false; std::vector<std::string> filenames_; @@ -102,6 +103,9 @@ class DexAnalyze { if (options->run_all_experiments_ || options->exp_analyze_strings_) { experiments_.emplace_back(new AnalyzeStrings); } + if (options->run_all_experiments_ || options->exp_code_metrics_) { + experiments_.emplace_back(new CodeMetrics); + } } bool ProcessDexFile(const DexFile& dex_file) { diff --git a/tools/dexanalyze/dexanalyze_experiments.cc b/tools/dexanalyze/dexanalyze_experiments.cc index 0f20a99f05..7006370c0b 100644 --- a/tools/dexanalyze/dexanalyze_experiments.cc +++ b/tools/dexanalyze/dexanalyze_experiments.cc @@ -32,13 +32,41 @@ namespace art { +static inline bool IsRange(Instruction::Code code) { + return code == Instruction::INVOKE_VIRTUAL_RANGE || + code == Instruction::INVOKE_DIRECT_RANGE || + code == Instruction::INVOKE_SUPER_RANGE || + code == Instruction::INVOKE_STATIC_RANGE || + code == Instruction::INVOKE_INTERFACE_RANGE; +} + +static inline uint16_t NumberOfArgs(const Instruction& inst) { + return IsRange(inst.Opcode()) ? inst.VRegA_3rc() : inst.VRegA_35c(); +} + +static inline uint16_t DexMethodIndex(const Instruction& inst) { + return IsRange(inst.Opcode()) ? inst.VRegB_3rc() : inst.VRegB_35c(); +} + std::string Percent(uint64_t value, uint64_t max) { if (max == 0) { - ++max; + return "0"; } - return android::base::StringPrintf("%" PRId64 "(%.2f%%)", - value, - static_cast<double>(value * 100) / static_cast<double>(max)); + return android::base::StringPrintf( + "%" PRId64 "(%.2f%%)", + value, + static_cast<double>(value * 100) / static_cast<double>(max)); +} + +std::string PercentDivide(uint64_t value, uint64_t max) { + if (max == 0) { + return "0"; + } + return android::base::StringPrintf( + "%" PRId64 "/%" PRId64 "(%.2f%%)", + value, + max, + static_cast<double>(value * 100) / static_cast<double>(max)); } static size_t PrefixLen(const std::string& a, const std::string& b) { @@ -150,38 +178,52 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) { // Invoke cases. case Instruction::INVOKE_VIRTUAL: case Instruction::INVOKE_VIRTUAL_RANGE: { - bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE); - uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c(); + uint32_t method_idx = DexMethodIndex(inst.Inst()); if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) { ++same_class_virtual_; - } else { - ++other_class_virtual_; - unique_method_ids.insert(method_idx); } + ++total_virtual_; + unique_method_ids.insert(method_idx); break; } case Instruction::INVOKE_DIRECT: case Instruction::INVOKE_DIRECT_RANGE: { - bool is_range = (inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE); - uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); + uint32_t method_idx = DexMethodIndex(inst.Inst()); if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) { ++same_class_direct_; - } else { - ++other_class_direct_; - unique_method_ids.insert(method_idx); } + ++total_direct_; + unique_method_ids.insert(method_idx); break; } case Instruction::INVOKE_STATIC: case Instruction::INVOKE_STATIC_RANGE: { - bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE); - uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); + uint32_t method_idx = DexMethodIndex(inst.Inst()); if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) { ++same_class_static_; - } else { - ++other_class_static_; - unique_method_ids.insert(method_idx); } + ++total_static_; + unique_method_ids.insert(method_idx); + break; + } + case Instruction::INVOKE_INTERFACE: + case Instruction::INVOKE_INTERFACE_RANGE: { + uint32_t method_idx = DexMethodIndex(inst.Inst()); + if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) { + ++same_class_interface_; + } + ++total_interface_; + unique_method_ids.insert(method_idx); + break; + } + case Instruction::INVOKE_SUPER: + case Instruction::INVOKE_SUPER_RANGE: { + uint32_t method_idx = DexMethodIndex(inst.Inst()); + if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) { + ++same_class_super_; + } + ++total_super_; + unique_method_ids.insert(method_idx); break; } default: @@ -201,24 +243,75 @@ void CountDexIndices::Dump(std::ostream& os, uint64_t total_size) const { os << "Num field ids: " << num_field_ids_ << "\n"; os << "Num type ids: " << num_type_ids_ << "\n"; os << "Num class defs: " << num_class_defs_ << "\n"; - os << "Same class direct: " << same_class_direct_ << "\n"; - os << "Other class direct: " << other_class_direct_ << "\n"; - os << "Same class virtual: " << same_class_virtual_ << "\n"; - os << "Other class virtual: " << other_class_virtual_ << "\n"; - os << "Same class static: " << same_class_static_ << "\n"; - os << "Other class static: " << other_class_static_ << "\n"; + os << "Direct same class: " << PercentDivide(same_class_direct_, total_direct_) << "\n"; + os << "Virtual same class: " << PercentDivide(same_class_virtual_, total_virtual_) << "\n"; + os << "Static same class: " << PercentDivide(same_class_static_, total_static_) << "\n"; + os << "Interface same class: " << PercentDivide(same_class_interface_, total_interface_) << "\n"; + os << "Super same class: " << PercentDivide(same_class_super_, total_super_) << "\n"; os << "Num strings accessed from code: " << num_string_ids_from_code_ << "\n"; os << "Unique(per class) method ids accessed from code: " << total_unique_method_idx_ << "\n"; os << "Unique(per class) string ids accessed from code: " << total_unique_string_ids_ << "\n"; - size_t same_class_total = same_class_direct_ + same_class_virtual_ + same_class_static_; - size_t other_class_total = other_class_direct_ + other_class_virtual_ + other_class_static_; - os << "Same class invoke: " << same_class_total << "\n"; - os << "Other class invoke: " << other_class_total << "\n"; + const size_t same_class_total = + same_class_direct_ + + same_class_virtual_ + + same_class_static_ + + same_class_interface_ + + same_class_super_; + const size_t other_class_total = + total_direct_ + + total_virtual_ + + total_static_ + + total_interface_ + + total_super_; + os << "Same class invokes: " << PercentDivide(same_class_total, other_class_total) << "\n"; os << "Invokes from code: " << (same_class_total + other_class_total) << "\n"; os << "Total Dex code bytes: " << Percent(dex_code_bytes_, total_size) << "\n"; os << "Total unique code items: " << total_unique_code_items_ << "\n"; os << "Total Dex size: " << total_size << "\n"; } -} // namespace art +void CodeMetrics::ProcessDexFile(const DexFile& dex_file) { + for (ClassAccessor accessor : dex_file.GetClasses()) { + for (const ClassAccessor::Method& method : accessor.GetMethods()) { + bool space_for_out_arg = false; + for (const DexInstructionPcPair& inst : method.GetInstructions()) { + switch (inst->Opcode()) { + case Instruction::INVOKE_VIRTUAL: + case Instruction::INVOKE_DIRECT: + case Instruction::INVOKE_SUPER: + case Instruction::INVOKE_INTERFACE: + case Instruction::INVOKE_STATIC: { + const uint32_t args = NumberOfArgs(inst.Inst()); + CHECK_LT(args, kMaxArgCount); + ++arg_counts_[args]; + space_for_out_arg = args < kMaxArgCount - 1; + break; + } + case Instruction::MOVE_RESULT: + case Instruction::MOVE_RESULT_OBJECT: { + if (space_for_out_arg) { + move_result_savings_ += inst->SizeInCodeUnits() * 2; + } + break; + } + default: + space_for_out_arg = false; + break; + } + } + } + } +} +void CodeMetrics::Dump(std::ostream& os, uint64_t total_size) const { + const uint64_t total = std::accumulate(arg_counts_, arg_counts_ + kMaxArgCount, 0u); + for (size_t i = 0; i < kMaxArgCount; ++i) { + os << "args=" << i << ": " << Percent(arg_counts_[i], total) << "\n"; + } + os << "Move result savings: " << Percent(move_result_savings_, total_size) << "\n"; + os << "One byte invoke savings: " << Percent(total, total_size) << "\n"; + const uint64_t low_arg_total = std::accumulate(arg_counts_, arg_counts_ + 3, 0u); + os << "Low arg savings: " << Percent(low_arg_total * 2, total_size) << "\n"; +} + +} // namespace art diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h index c84b082955..7ba2a49372 100644 --- a/tools/dexanalyze/dexanalyze_experiments.h +++ b/tools/dexanalyze/dexanalyze_experiments.h @@ -75,11 +75,28 @@ class CountDexIndices : public Experiment { // Invokes size_t same_class_direct_ = 0; - size_t other_class_direct_ = 0; + size_t total_direct_ = 0; size_t same_class_virtual_ = 0; - size_t other_class_virtual_ = 0; + size_t total_virtual_ = 0; size_t same_class_static_ = 0; - size_t other_class_static_ = 0; + size_t total_static_ = 0; + size_t same_class_interface_ = 0; + size_t total_interface_ = 0; + size_t same_class_super_ = 0; + size_t total_super_ = 0; +}; + +// Measure various code metrics including args per invoke-virtual, fill/spill move paterns. +class CodeMetrics : public Experiment { + public: + void ProcessDexFile(const DexFile& dex_file); + + void Dump(std::ostream& os, uint64_t total_size) const; + + private: + static constexpr size_t kMaxArgCount = 6; + uint64_t arg_counts_[kMaxArgCount] = {}; + uint64_t move_result_savings_ = 0u; }; } // namespace art diff --git a/tools/teardown-buildbot-device.sh b/tools/teardown-buildbot-device.sh index df239a28bc..bf14ca4f9f 100755 --- a/tools/teardown-buildbot-device.sh +++ b/tools/teardown-buildbot-device.sh @@ -25,6 +25,27 @@ adb root adb wait-for-device if [[ -n "$ART_TEST_CHROOT" ]]; then + + # remove_filesystem_from_chroot DIR-IN-CHROOT FSTYPE REMOVE-DIR-IN-CHROOT + # ----------------------------------------------------------------------- + # Unmount filesystem with type FSTYPE mounted in directory DIR-IN-CHROOT + # under the chroot directory. + # Remove DIR-IN-CHROOT under the chroot if REMOVE-DIR-IN-CHROOT is + # true. + remove_filesystem_from_chroot() { + local dir_in_chroot=$1 + local fstype=$2 + local remove_dir=$3 + local dir="$ART_TEST_CHROOT/$dir_in_chroot" + adb shell test -d "$dir" \ + && adb shell mount | grep -q "^$fstype on $dir type $fstype " \ + && if adb shell umount "$dir"; then + $remove_dir && adb shell rmdir "$dir" + else + adb shell lsof "$dir" + fi + } + # Tear down the chroot dir. echo -e "${green}Tear down the chroot dir in $ART_TEST_CHROOT${nc}" @@ -32,22 +53,17 @@ if [[ -n "$ART_TEST_CHROOT" ]]; then [[ "x$ART_TEST_CHROOT" = x/* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; } # Remove /dev from chroot. - adb shell mount | grep -q "^tmpfs on $ART_TEST_CHROOT/dev type tmpfs " \ - && adb shell umount "$ART_TEST_CHROOT/dev" \ - && adb shell rmdir "$ART_TEST_CHROOT/dev" + remove_filesystem_from_chroot dev tmpfs true # Remove /sys/kernel/debug from chroot. - adb shell mount | grep -q "^debugfs on $ART_TEST_CHROOT/sys/kernel/debug type debugfs " \ - && adb shell umount "$ART_TEST_CHROOT/sys/kernel/debug" + # The /sys/kernel/debug directory under the chroot dir cannot be + # deleted, as it is part of the host device's /sys filesystem. + remove_filesystem_from_chroot sys/kernel/debug debugfs false # Remove /sys from chroot. - adb shell mount | grep -q "^sysfs on $ART_TEST_CHROOT/sys type sysfs " \ - && adb shell umount "$ART_TEST_CHROOT/sys" \ - && adb shell rmdir "$ART_TEST_CHROOT/sys" + remove_filesystem_from_chroot sys sysfs true # Remove /proc from chroot. - adb shell mount | grep -q "^proc on $ART_TEST_CHROOT/proc type proc " \ - && adb shell umount "$ART_TEST_CHROOT/proc" \ - && adb shell rmdir "$ART_TEST_CHROOT/proc" + remove_filesystem_from_chroot proc proc true # Remove /etc from chroot. adb shell rm -f "$ART_TEST_CHROOT/etc" @@ -65,6 +81,6 @@ if [[ -n "$ART_TEST_CHROOT" ]]; then /plat_property_contexts \ /nonplat_property_contexts" for f in $property_context_files; do - adb shell test -f "$f" "&&" rm -f "$ART_TEST_CHROOT$f" + adb shell rm -f "$ART_TEST_CHROOT$f" done fi diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp index 5186c43ca2..96d4a094b5 100644 --- a/tools/veridex/Android.bp +++ b/tools/veridex/Android.bp @@ -24,11 +24,16 @@ cc_binary { "veridex.cc", ], cflags: ["-Wall", "-Werror"], - shared_libs: [ + static_libs: [ "libdexfile", "libartbase", "libbase", + "liblog", + "libutils", + "libz", + "libziparchive", ], + stl: "libc++_static", header_libs: [ "art_libartbase_headers", ], diff --git a/tools/veridex/Android.mk b/tools/veridex/Android.mk index 51d924a3c1..83fa0d6397 100644 --- a/tools/veridex/Android.mk +++ b/tools/veridex/Android.mk @@ -16,6 +16,9 @@ LOCAL_PATH := $(call my-dir) +# The veridex tool takes stub dex files as input, so we generate both the system and oahl +# dex stubs. + system_stub_dex := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/core_dex_intermediates/classes.dex $(system_stub_dex): PRIVATE_MIN_SDK_VERSION := 1000 $(system_stub_dex): $(call resolve-prebuilt-sdk-jar-path,system_current) | $(ZIP2ZIP) $(DX) @@ -27,9 +30,29 @@ $(oahl_stub_dex): PRIVATE_MIN_SDK_VERSION := 1000 $(oahl_stub_dex): $(call get-prebuilt-sdk-dir,current)/org.apache.http.legacy.jar | $(ZIP2ZIP) $(DX) $(transform-classes-d8.jar-to-dex) +app_compat_lists := \ + $(INTERNAL_PLATFORM_HIDDENAPI_LIGHT_GREYLIST) \ + $(INTERNAL_PLATFORM_HIDDENAPI_DARK_GREYLIST) \ + $(INTERNAL_PLATFORM_HIDDENAPI_BLACKLIST) + +# Phony rule to create all dependencies of the appcompat.sh script. .PHONY: appcompat +appcompat: $(system_stub_dex) $(oahl_stub_dex) $(HOST_OUT_EXECUTABLES)/veridex $(app_compat_lists) + +VERIDEX_FILES_PATH := \ + $(call intermediates-dir-for,PACKAGING,veridex,HOST)/veridex.zip + +VERIDEX_FILES := $(LOCAL_PATH)/appcompat.sh + +$(VERIDEX_FILES_PATH): PRIVATE_VERIDEX_FILES := $(VERIDEX_FILES) +$(VERIDEX_FILES_PATH): PRIVATE_APP_COMPAT_LISTS := $(app_compat_lists) +$(VERIDEX_FILES_PATH) : $(SOONG_ZIP) $(VERIDEX_FILES) $(app_compat_lists) $(HOST_OUT_EXECUTABLES)/veridex + $(hide) $(SOONG_ZIP) -o $@ -C art/tools/veridex -f $(PRIVATE_VERIDEX_FILES) \ + -C $(dir $(lastword $(PRIVATE_APP_COMPAT_LISTS))) $(addprefix -f , $(PRIVATE_APP_COMPAT_LISTS)) \ + -C $(HOST_OUT_EXECUTABLES) -f $(HOST_OUT_EXECUTABLES)/veridex + +# Make the zip file available for prebuilts. +$(call dist-for-goals,sdk,$(VERIDEX_FILES_PATH)) -appcompat: $(system_stub_dex) $(oahl_stub_dex) $(HOST_OUT_EXECUTABLES)/veridex \ - ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-light-greylist.txt \ - ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-dark-greylist.txt \ - ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-blacklist.txt +VERIDEX_FILES := +app_compat_lists := diff --git a/tools/veridex/appcompat.sh b/tools/veridex/appcompat.sh index 31a8654b58..c07ab21a4b 100755 --- a/tools/veridex/appcompat.sh +++ b/tools/veridex/appcompat.sh @@ -14,7 +14,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -# We want to be at the root for simplifying the "out" detection +echo "NOTE: appcompat.sh is still under development. It can report" +echo "API uses that do not execute at runtime, and reflection uses" +echo "that do not exist. It can also miss on reflection uses." + +# First check if the script is invoked from a prebuilts location. +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +if [[ -e ${SCRIPT_DIR}/veridex && \ + -e ${SCRIPT_DIR}/hiddenapi-blacklist.txt && \ + -e ${SCRIPT_DIR}/hiddenapi-light-greylist.txt && \ + -e ${SCRIPT_DIR}/hiddenapi-dark-greylist.txt && \ + -e ${SCRIPT_DIR}/org.apache.http.legacy-stubs.dex && \ + -e ${SCRIPT_DIR}/system-stubs.dex ]]; then + exec ${SCRIPT_DIR}/veridex \ + --core-stubs=${SCRIPT_DIR}/system-stubs.dex:${SCRIPT_DIR}/org.apache.http.legacy-stubs.dex \ + --blacklist=${SCRIPT_DIR}/hiddenapi-blacklist.txt \ + --light-greylist=${SCRIPT_DIR}/hiddenapi-light-greylist.txt \ + --dark-greylist=${SCRIPT_DIR}/hiddenapi-dark-greylist.txt \ + $@ +fi + +# Otherwise, we want to be at the root for simplifying the "out" detection # logic. if [ ! -d art ]; then echo "Script needs to be run at the root of the android tree." @@ -38,10 +59,6 @@ if [ -z "$ANDROID_HOST_OUT" ] ; then ANDROID_HOST_OUT=${OUT}/host/linux-x86 fi -echo "NOTE: appcompat.sh is still under development. It can report" -echo "API uses that do not execute at runtime, and reflection uses" -echo "that do not exist. It can also miss on reflection uses." - ${ANDROID_HOST_OUT}/bin/veridex \ --core-stubs=${PACKAGING}/core_dex_intermediates/classes.dex:${PACKAGING}/oahl_dex_intermediates/classes.dex \ |