diff options
-rw-r--r-- | compiler/exception_test.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 13 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.cc | 8 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_stream.h | 3 | ||||
-rw-r--r-- | compiler/optimizing/stack_map_test.cc | 18 | ||||
-rw-r--r-- | runtime/oat.h | 4 | ||||
-rw-r--r-- | runtime/oat_quick_method_header.h | 10 | ||||
-rw-r--r-- | runtime/stack_map.cc | 1 | ||||
-rw-r--r-- | runtime/stack_map.h | 9 |
10 files changed, 23 insertions, 47 deletions
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index f978cc62f3..d5ceafeac9 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -80,7 +80,7 @@ class ExceptionTest : public CommonRuntimeTest { stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u); stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset); stack_maps.EndStackMapEntry(); - stack_maps.EndMethod(code_size); + stack_maps.EndMethod(); ScopedArenaVector<uint8_t> stack_map = stack_maps.Encode(); const size_t stack_maps_size = stack_map.size(); diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 122f27b0e0..9e2f5cd508 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -440,7 +440,7 @@ void CodeGenerator::Compile(CodeAllocator* allocator) { // Finalize instructions in assember; Finalize(allocator); - GetStackMapStream()->EndMethod(GetAssembler()->CodeSize()); + GetStackMapStream()->EndMethod(); } void CodeGenerator::Finalize(CodeAllocator* allocator) { diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 4f43b71fde..e8f8d32525 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1171,8 +1171,7 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, } static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator, - const JniCompiledMethod& jni_compiled_method, - size_t code_size) { + const JniCompiledMethod& jni_compiled_method) { // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator // to stay clear of the frame size limit. std::unique_ptr<StackMapStream> stack_map_stream( @@ -1182,7 +1181,7 @@ static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* alloca jni_compiled_method.GetCoreSpillMask(), jni_compiled_method.GetFpSpillMask(), /* num_dex_registers= */ 0); - stack_map_stream->EndMethod(code_size); + stack_map_stream->EndMethod(); return stack_map_stream->Encode(); } @@ -1240,8 +1239,8 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub); ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( - &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, + jni_compiled_method); return CompiledMethod::SwapAllocCompiledMethod( GetCompilerDriver()->GetCompiledMethodStorage(), jni_compiled_method.GetInstructionSet(), @@ -1291,8 +1290,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArenaStack arena_stack(runtime->GetJitArenaPool()); // StackMapStream is large and it does not fit into this frame, so we need helper method. ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. - ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap( - &stack_map_allocator, jni_compiled_method, jni_compiled_method.GetCode().size()); + ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, + jni_compiled_method); uint8_t* stack_map_data = nullptr; uint8_t* roots_data = nullptr; uint32_t data_size = code_cache->ReserveData(self, diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index e87f3c80ad..60ca61c133 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -54,10 +54,9 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes, num_dex_registers_ = num_dex_registers; } -void StackMapStream::EndMethod(size_t code_size) { +void StackMapStream::EndMethod() { DCHECK(in_method_) << "Mismatched Begin/End calls"; in_method_ = false; - packed_code_size_ = StackMap::PackNativePc(code_size, instruction_set_); // Read the stack masks now. The compiler might have updated them. for (size_t i = 0; i < lazy_stack_masks_.size(); i++) { @@ -67,10 +66,6 @@ void StackMapStream::EndMethod(size_t code_size) { stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits()); } } - - for (size_t i = 0; i < stack_maps_.size(); i++) { - DCHECK_LE(stack_maps_[i][StackMap::kPackedNativePc], packed_code_size_); - } } void StackMapStream::BeginStackMapEntry(uint32_t dex_pc, @@ -301,7 +296,6 @@ ScopedArenaVector<uint8_t> StackMapStream::Encode() { ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream)); BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer); - out.WriteVarint(packed_code_size_); out.WriteVarint(packed_frame_size_); out.WriteVarint(core_spill_mask_); out.WriteVarint(fp_spill_mask_); diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 164e9021b2..01c6bf9e0e 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -62,7 +62,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { size_t core_spill_mask, size_t fp_spill_mask, uint32_t num_dex_registers); - void EndMethod(size_t code_size); + void EndMethod(); void BeginStackMapEntry(uint32_t dex_pc, uint32_t native_pc_offset, @@ -99,7 +99,6 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> { ScopedArenaAllocator* allocator_; const InstructionSet instruction_set_; - uint32_t packed_code_size_ = 0; uint32_t packed_frame_size_ = 0; uint32_t core_spill_mask_ = 0; uint32_t fp_spill_mask_ = 0; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index cbd844f2cf..d28f09fbba 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -61,7 +61,7 @@ TEST(StackMapTest, Test1) { stream.AddDexRegisterEntry(Kind::kConstant, -2); // Short location. stream.EndStackMapEntry(); - stream.EndMethod(64 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -147,7 +147,7 @@ TEST(StackMapTest, Test2) { stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1); // Short location. stream.EndStackMapEntry(); - stream.EndMethod(256 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -317,7 +317,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { stream.EndInlineInfoEntry(); stream.EndStackMapEntry(); - stream.EndMethod(64 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -372,7 +372,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); - stream.EndMethod(64 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -431,7 +431,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) { stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location. stream.EndStackMapEntry(); - stream.EndMethod(66 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo ci(memory.data()); @@ -479,7 +479,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { stream.AddDexRegisterEntry(Kind::kNone, 0); stream.EndStackMapEntry(); - stream.EndMethod(68 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -578,7 +578,7 @@ TEST(StackMapTest, InlineTest) { stream.EndStackMapEntry(); - stream.EndMethod(78 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo ci(memory.data()); @@ -722,7 +722,7 @@ TEST(StackMapTest, TestDeduplicateStackMask) { stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask); stream.EndStackMapEntry(); - stream.EndMethod(8 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); CodeInfo code_info(memory.data()); @@ -746,7 +746,7 @@ TEST(StackMapTest, TestDedupeBitTables) { stream.AddDexRegisterEntry(Kind::kConstant, -2); stream.EndStackMapEntry(); - stream.EndMethod(64 * kPcAlign); + stream.EndMethod(); ScopedArenaVector<uint8_t> memory = stream.Encode(); std::vector<uint8_t> out; diff --git a/runtime/oat.h b/runtime/oat.h index b824729b3d..88238d96ec 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -31,8 +31,8 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - // Last oat version changed reason: Add code size to CodeInfo. - static constexpr uint8_t kOatVersion[] = { '1', '6', '7', '\0' }; + // Last oat version changed reason: Partial boot image. + static constexpr uint8_t kOatVersion[] = { '1', '6', '6', '\0' }; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; static constexpr const char* kDebuggableKey = "debuggable"; diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h index 6c123c4eb5..8798c6968c 100644 --- a/runtime/oat_quick_method_header.h +++ b/runtime/oat_quick_method_header.h @@ -35,8 +35,6 @@ class PACKED(4) OatQuickMethodHeader { uint32_t code_size) : vmap_table_offset_(vmap_table_offset), code_size_(code_size) { - DCHECK_NE(vmap_table_offset, 0u); - DCHECK_NE(code_size, 0u); } static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) { @@ -60,7 +58,7 @@ class PACKED(4) OatQuickMethodHeader { } bool IsOptimized() const { - return (code_size_ & kCodeSizeMask) != 0 && vmap_table_offset_ != 0; + return GetCodeSize() != 0 && vmap_table_offset_ != 0; } const uint8_t* GetOptimizedCodeInfoPtr() const { @@ -78,11 +76,7 @@ class PACKED(4) OatQuickMethodHeader { } uint32_t GetCodeSize() const { - DCHECK(IsOptimized()); - size_t code_size1 = code_size_ & kCodeSizeMask; - size_t code_size2 = CodeInfo::DecodeCodeSize(GetOptimizedCodeInfoPtr()); - DCHECK_EQ(code_size1, code_size2); - return code_size2; + return code_size_ & kCodeSizeMask; } const uint32_t* GetCodeSizeAddr() const { diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc index 5d30b771ae..62dec15c57 100644 --- a/runtime/stack_map.cc +++ b/runtime/stack_map.cc @@ -227,7 +227,6 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios, bool verbose, InstructionSet instruction_set) const { vios->Stream() << "CodeInfo BitSize=" << size_in_bits_ - << " CodeSize:" << StackMap::UnpackNativePc(packed_code_size_, instruction_set) << " FrameSize:" << packed_frame_size_ * kStackAlignment << " CoreSpillMask:" << std::hex << core_spill_mask_ << " FpSpillMask:" << std::hex << fp_spill_mask_ diff --git a/runtime/stack_map.h b/runtime/stack_map.h index 59da923661..87133cf59c 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -438,15 +438,8 @@ class CodeInfo { // Accumulate code info size statistics into the given Stats tree. static void CollectSizeStats(const uint8_t* code_info, /*out*/ Stats* parent); - ALWAYS_INLINE static size_t DecodeCodeSize(const uint8_t* data, - InstructionSet isa = kRuntimeISA) { - uint32_t packed_code_size = BitMemoryReader(data).ReadVarint(); - return StackMap::UnpackNativePc(packed_code_size, isa); - } - ALWAYS_INLINE static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data) { BitMemoryReader reader(data); - reader.ReadVarint(); // Skip code size. return QuickMethodFrameInfo( reader.ReadVarint() * kStackAlignment, // Decode packed_frame_size_ and unpack. reader.ReadVarint(), // core_spill_mask_. @@ -468,7 +461,6 @@ class CodeInfo { // Invokes the callback with member pointer of each header field. template<typename Callback> ALWAYS_INLINE static void ForEachHeaderField(Callback callback) { - callback(&CodeInfo::packed_code_size_); callback(&CodeInfo::packed_frame_size_); callback(&CodeInfo::core_spill_mask_); callback(&CodeInfo::fp_spill_mask_); @@ -494,7 +486,6 @@ class CodeInfo { callback(&CodeInfo::dex_register_catalog_); } - uint32_t packed_code_size_ = 0; // The size of native PC range. uint32_t packed_frame_size_ = 0; // Frame size in kStackAlignment units. uint32_t core_spill_mask_ = 0; uint32_t fp_spill_mask_ = 0; |