| /* |
| * Copyright (C) 2011 The Android Open Source Project |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| #ifndef ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_ |
| #define ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_ |
| |
| #include "arch/instruction_set.h" |
| #include "base/locks.h" |
| #include "base/macros.h" |
| #include "base/utils.h" |
| #include "quick/quick_method_frame_info.h" |
| #include "stack_map.h" |
| |
| namespace art { |
| |
| class ArtMethod; |
| |
| // Size in bytes of the should_deoptimize flag on stack. |
| // We just need 4 bytes for our purpose regardless of the architecture. Frame size |
| // calculation will automatically do alignment for the final frame size. |
| static constexpr size_t kShouldDeoptimizeFlagSize = 4; |
| |
| // OatQuickMethodHeader precedes the raw code chunk generated by the compiler. |
| class PACKED(4) OatQuickMethodHeader { |
| public: |
| OatQuickMethodHeader(uint32_t code_info_offset = 0) { |
| SetCodeInfoOffset(code_info_offset); |
| } |
| |
| static OatQuickMethodHeader* NterpMethodHeader; |
| |
| bool IsNterpMethodHeader() const; |
| |
| static bool IsNterpPc(uintptr_t pc) { |
| return OatQuickMethodHeader::NterpMethodHeader != nullptr && |
| OatQuickMethodHeader::NterpMethodHeader->Contains(pc); |
| } |
| |
| static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) { |
| uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr); |
| uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_); |
| DCHECK(IsAlignedParam(code, GetInstructionSetCodeAlignment(kRuntimeISA)) || |
| IsAlignedParam(header, GetInstructionSetCodeAlignment(kRuntimeISA))) |
| << std::hex << code << " " << std::hex << header; |
| return reinterpret_cast<OatQuickMethodHeader*>(header); |
| } |
| |
| static OatQuickMethodHeader* FromEntryPoint(const void* entry_point) { |
| return FromCodePointer(EntryPointToCodePointer(entry_point)); |
| } |
| |
| static size_t InstructionAlignedSize() { |
| return RoundUp(sizeof(OatQuickMethodHeader), GetInstructionSetCodeAlignment(kRuntimeISA)); |
| } |
| |
| OatQuickMethodHeader(const OatQuickMethodHeader&) = default; |
| OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default; |
| |
| uintptr_t NativeQuickPcOffset(const uintptr_t pc) const { |
| return pc - reinterpret_cast<uintptr_t>(GetEntryPoint()); |
| } |
| |
| ALWAYS_INLINE bool IsOptimized() const { |
| uintptr_t code = reinterpret_cast<uintptr_t>(code_); |
| DCHECK_NE(data_, 0u) << std::hex << code; // Probably a padding of native code. |
| DCHECK_NE(data_, kInvalidData) << std::hex << code; // Probably a stub or trampoline. |
| return (data_ & kIsCodeInfoMask) != 0; |
| } |
| |
| ALWAYS_INLINE const uint8_t* GetOptimizedCodeInfoPtr() const { |
| uint32_t offset = GetCodeInfoOffset(); |
| DCHECK_NE(offset, 0u); |
| return code_ - offset; |
| } |
| |
| ALWAYS_INLINE uint8_t* GetOptimizedCodeInfoPtr() { |
| uint32_t offset = GetCodeInfoOffset(); |
| DCHECK_NE(offset, 0u); |
| return code_ - offset; |
| } |
| |
| ALWAYS_INLINE const uint8_t* GetCode() const { |
| return code_; |
| } |
| |
| ALWAYS_INLINE uint32_t GetCodeSize() const { |
| return LIKELY(IsOptimized()) |
| ? CodeInfo::DecodeCodeSize(GetOptimizedCodeInfoPtr()) |
| : (data_ & kCodeSizeMask); |
| } |
| |
| ALWAYS_INLINE uint32_t GetCodeInfoOffset() const { |
| DCHECK(IsOptimized()); |
| return data_ & kCodeInfoMask; |
| } |
| |
| void SetCodeInfoOffset(uint32_t offset) { |
| data_ = kIsCodeInfoMask | offset; |
| DCHECK_EQ(GetCodeInfoOffset(), offset); |
| } |
| |
| bool Contains(uintptr_t pc) const { |
| // We should not call `Contains` on a stub or trampoline. |
| DCHECK_NE(data_, kInvalidData) << std::hex << reinterpret_cast<uintptr_t>(code_); |
| // Remove hwasan tag to make comparison below valid. The PC from the stack does not have it. |
| uintptr_t code_start = reinterpret_cast<uintptr_t>(HWASanUntag(code_)); |
| static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); |
| if (kRuntimeISA == InstructionSet::kArm) { |
| // On Thumb-2, the pc is offset by one. |
| code_start++; |
| } |
| return code_start <= pc && pc <= (code_start + GetCodeSize()); |
| } |
| |
| const uint8_t* GetEntryPoint() const { |
| // When the runtime architecture is ARM, `kRuntimeISA` is set to `kArm` |
| // (not `kThumb2`), *but* we always generate code for the Thumb-2 |
| // instruction set anyway. Thumb-2 requires the entrypoint to be of |
| // offset 1. |
| static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA"); |
| return (kRuntimeISA == InstructionSet::kArm) |
| ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1) |
| : code_; |
| } |
| |
| template <bool kCheckFrameSize = true> |
| uint32_t GetFrameSizeInBytes() const { |
| uint32_t result = GetFrameInfo().FrameSizeInBytes(); |
| if (kCheckFrameSize) { |
| DCHECK_ALIGNED(result, kStackAlignment); |
| } |
| return result; |
| } |
| |
| QuickMethodFrameInfo GetFrameInfo() const { |
| DCHECK(IsOptimized()); |
| return CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr()); |
| } |
| |
| size_t GetShouldDeoptimizeFlagOffset() const { |
| DCHECK(IsOptimized()); |
| QuickMethodFrameInfo frame_info = GetFrameInfo(); |
| size_t frame_size = frame_info.FrameSizeInBytes(); |
| size_t core_spill_size = |
| POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA); |
| size_t fpu_spill_size = |
| POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA); |
| return frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize; |
| } |
| |
| // For non-catch handlers. Only used in test code. |
| uintptr_t ToNativeQuickPc(ArtMethod* method, |
| const uint32_t dex_pc, |
| bool abort_on_failure = true) const; |
| |
| // For catch handlers. |
| uintptr_t ToNativeQuickPcForCatchHandlers(ArtMethod* method, |
| ArrayRef<const uint32_t> dex_pc_list, |
| /* out */ uint32_t* stack_map_row, |
| bool abort_on_failure = true) const; |
| |
| uint32_t ToDexPc(ArtMethod** frame, |
| const uintptr_t pc, |
| bool abort_on_failure = true) const |
| REQUIRES_SHARED(Locks::mutator_lock_); |
| |
| void SetHasShouldDeoptimizeFlag() { |
| DCHECK(!HasShouldDeoptimizeFlag()); |
| data_ |= kShouldDeoptimizeMask; |
| } |
| |
| bool HasShouldDeoptimizeFlag() const { |
| return (data_ & kShouldDeoptimizeMask) != 0; |
| } |
| |
| private: |
| static constexpr uint32_t kShouldDeoptimizeMask = 0x80000000; |
| static constexpr uint32_t kIsCodeInfoMask = 0x40000000; |
| static constexpr uint32_t kCodeInfoMask = 0x3FFFFFFF; // If kIsCodeInfoMask is set. |
| static constexpr uint32_t kCodeSizeMask = 0x3FFFFFFF; // If kIsCodeInfoMask is clear. |
| |
| // In order to not confuse a stub with Java-generated code, we prefix each |
| // stub with a 0xFFFFFFFF marker. |
| static constexpr uint32_t kInvalidData = 0xFFFFFFFF; |
| |
| uint32_t data_ = 0u; // Combination of fields using the above masks. |
| uint8_t code_[0]; // The actual method code. |
| }; |
| |
| } // namespace art |
| |
| #endif // ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_ |