diff options
author | 2017-06-13 14:11:11 -0700 | |
---|---|---|
committer | 2017-06-19 16:21:58 -0700 | |
commit | 36a296ff674e0aea16b29db965f84f81866b1375 (patch) | |
tree | 9d940463852089016a0d0932368923d04da2c3fe | |
parent | 77c7d1093281bec76d3055541fe41145cdc5f807 (diff) |
ART: Refactor stack.h
Factor out LockCountData, ShadowFrame and JavaFrameRootInfo and leave
stack.h for the StackVisitor. Move single-use function to user to remove
DexFile dependency.
Test: m test-art-host
Change-Id: I53d2880917bdf6782856fd2b16f38a0293f3aefc
-rw-r--r-- | oatdump/oatdump.cc | 63 | ||||
-rw-r--r-- | runtime/Android.bp | 3 | ||||
-rw-r--r-- | runtime/interpreter/lock_count_data.cc | 111 | ||||
-rw-r--r-- | runtime/interpreter/lock_count_data.h | 74 | ||||
-rw-r--r-- | runtime/interpreter/shadow_frame.cc | 46 | ||||
-rw-r--r-- | runtime/interpreter/shadow_frame.h | 431 | ||||
-rw-r--r-- | runtime/java_frame_root_info.cc | 30 | ||||
-rw-r--r-- | runtime/java_frame_root_info.h | 52 | ||||
-rw-r--r-- | runtime/managed_stack-inl.h | 2 | ||||
-rw-r--r-- | runtime/method_handles.h | 4 | ||||
-rw-r--r-- | runtime/openjdkjvmti/ti_heap.cc | 1 | ||||
-rw-r--r-- | runtime/stack.cc | 169 | ||||
-rw-r--r-- | runtime/stack.h | 574 | ||||
-rw-r--r-- | runtime/thread.cc | 2 |
14 files changed, 865 insertions, 697 deletions
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index b3b67e0d2e..d8bafc011a 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -1395,6 +1395,54 @@ class OatDumper { method_info); } + static int GetOutVROffset(uint16_t out_num, InstructionSet isa) { + // According to stack model, the first out is above the Method referernce. + return static_cast<size_t>(InstructionSetPointerSize(isa)) + out_num * sizeof(uint32_t); + } + + static uint32_t GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, + uint32_t core_spills, + uint32_t fp_spills, + size_t frame_size, + int reg, + InstructionSet isa) { + PointerSize pointer_size = InstructionSetPointerSize(isa); + if (kIsDebugBuild) { + auto* runtime = Runtime::Current(); + if (runtime != nullptr) { + CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size); + } + } + DCHECK_ALIGNED(frame_size, kStackAlignment); + DCHECK_NE(reg, -1); + int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) + + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) + + sizeof(uint32_t); // Filler. + int num_regs = code_item->registers_size_ - code_item->ins_size_; + int temp_threshold = code_item->registers_size_; + const int max_num_special_temps = 1; + if (reg == temp_threshold) { + // The current method pointer corresponds to special location on stack. + return 0; + } else if (reg >= temp_threshold + max_num_special_temps) { + /* + * Special temporaries may have custom locations and the logic above deals with that. + * However, non-special temporaries are placed relative to the outs. + */ + int temps_start = code_item->outs_size_ * sizeof(uint32_t) + + static_cast<size_t>(pointer_size) /* art method */; + int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t); + return temps_start + relative_offset; + } else if (reg < num_regs) { + int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t); + return locals_start + (reg * sizeof(uint32_t)); + } else { + // Handle ins. + return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + + static_cast<size_t>(pointer_size) /* art method */; + } + } + void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item) { if (code_item != nullptr) { @@ -1414,13 +1462,12 @@ class OatDumper { os << "\n\tlocals:"; } - uint32_t offset = StackVisitor::GetVRegOffsetFromQuickCode( - code_item, - oat_method.GetCoreSpillMask(), - oat_method.GetFpSpillMask(), - oat_method.GetFrameSizeInBytes(), - reg, - GetInstructionSet()); + uint32_t offset = GetVRegOffsetFromQuickCode(code_item, + oat_method.GetCoreSpillMask(), + oat_method.GetFpSpillMask(), + oat_method.GetFrameSizeInBytes(), + reg, + GetInstructionSet()); os << " v" << reg << "[sp + #" << offset << "]"; } @@ -1429,7 +1476,7 @@ class OatDumper { os << "\n\touts:"; } - uint32_t offset = StackVisitor::GetOutVROffset(out_reg, GetInstructionSet()); + uint32_t offset = GetOutVROffset(out_reg, GetInstructionSet()); os << " v" << out_reg << "[sp + #" << offset << "]"; } diff --git a/runtime/Android.bp b/runtime/Android.bp index 26e52e012e..20f95c0c74 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -105,7 +105,10 @@ cc_defaults { "interpreter/interpreter_common.cc", "interpreter/interpreter_intrinsics.cc", "interpreter/interpreter_switch_impl.cc", + "interpreter/lock_count_data.cc", + "interpreter/shadow_frame.cc", "interpreter/unstarted_runtime.cc", + "java_frame_root_info.cc", "java_vm_ext.cc", "jdwp/jdwp_event.cc", "jdwp/jdwp_expand_buf.cc", diff --git a/runtime/interpreter/lock_count_data.cc b/runtime/interpreter/lock_count_data.cc new file mode 100644 index 0000000000..64b59cd390 --- /dev/null +++ b/runtime/interpreter/lock_count_data.cc @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "lock_count_data.h" + +#include <algorithm> +#include <string> + +#include "android-base/logging.h" +#include "mirror/object-inl.h" +#include "thread.h" + +namespace art { + +void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) { + if (obj == nullptr) { + return; + } + + // If there's an error during enter, we won't have locked the monitor. So check there's no + // exception. + if (self->IsExceptionPending()) { + return; + } + + if (monitors_ == nullptr) { + monitors_.reset(new std::vector<mirror::Object*>()); + } + monitors_->push_back(obj); +} + +void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) { + if (obj == nullptr) { + return; + } + bool found_object = false; + if (monitors_ != nullptr) { + // We need to remove one pointer to ref, as duplicates are used for counting recursive locks. + // We arbitrarily choose the first one. + auto it = std::find(monitors_->begin(), monitors_->end(), obj); + if (it != monitors_->end()) { + monitors_->erase(it); + found_object = true; + } + } + if (!found_object) { + // The object wasn't found. Time for an IllegalMonitorStateException. + // The order here isn't fully clear. Assume that any other pending exception is swallowed. + // TODO: Maybe make already pending exception a suppressed exception. + self->ClearException(); + self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;", + "did not lock monitor on object of type '%s' before unlocking", + const_cast<mirror::Object*>(obj)->PrettyTypeOf().c_str()); + } +} + +// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show +// that the object was locked. +void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS { + DCHECK(self != nullptr); + DCHECK(obj != nullptr); + obj->MonitorExit(self); +} + +bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) { + DCHECK(self != nullptr); + if (monitors_ != nullptr) { + if (!monitors_->empty()) { + // There may be an exception pending, if the method is terminating abruptly. Clear it. + // TODO: Should we add this as a suppressed exception? + self->ClearException(); + + // OK, there are monitors that are still locked. To enforce structured locking (and avoid + // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception. + for (mirror::Object* obj : *monitors_) { + MonitorExitHelper(self, obj); + // If this raised an exception, ignore. TODO: Should we add this as suppressed + // exceptions? + if (self->IsExceptionPending()) { + self->ClearException(); + } + } + // Raise an exception, just give the first object as the sample. + mirror::Object* first = (*monitors_)[0]; + self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;", + "did not unlock monitor on object of type '%s'", + mirror::Object::PrettyTypeOf(first).c_str()); + + // To make sure this path is not triggered again, clean out the monitors. + monitors_->clear(); + + return false; + } + } + return true; +} + +} // namespace art diff --git a/runtime/interpreter/lock_count_data.h b/runtime/interpreter/lock_count_data.h new file mode 100644 index 0000000000..64874a5db7 --- /dev/null +++ b/runtime/interpreter/lock_count_data.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_INTERPRETER_LOCK_COUNT_DATA_H_ +#define ART_RUNTIME_INTERPRETER_LOCK_COUNT_DATA_H_ + +#include <memory> +#include <vector> + +#include "base/mutex.h" + +namespace art { + +namespace mirror { + class Object; +} // namespace mirror + +class Thread; + +// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks. +// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are +// thread roots). +// Note: implementation is split so that the call sites may be optimized to no-ops in case no +// lock counting is necessary. The actual implementation is in the cc file to avoid +// dependencies. +class LockCountData { + public: + // Add the given object to the list of monitors, that is, objects that have been locked. This + // will not throw (but be skipped if there is an exception pending on entry). + void AddMonitor(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); + + // Try to remove the given object from the monitor list, indicating an unlock operation. + // This will throw an IllegalMonitorStateException (clearing any already pending exception), in + // case that there wasn't a lock recorded for the object. + void RemoveMonitorOrThrow(Thread* self, + const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); + + // Check whether all acquired monitors have been released. This will potentially throw an + // IllegalMonitorStateException, clearing any already pending exception. Returns true if the + // check shows that everything is OK wrt/ lock counting, false otherwise. + bool CheckAllMonitorsReleasedOrThrow(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); + + template <typename T, typename... Args> + void VisitMonitors(T visitor, Args&&... args) REQUIRES_SHARED(Locks::mutator_lock_) { + if (monitors_ != nullptr) { + // Visitors may change the Object*. Be careful with the foreach loop. + for (mirror::Object*& obj : *monitors_) { + visitor(/* inout */ &obj, std::forward<Args>(args)...); + } + } + } + + private: + // Stores references to the locked-on objects. As noted, this should be visited during thread + // marking. + std::unique_ptr<std::vector<mirror::Object*>> monitors_; +}; + +} // namespace art + +#endif // ART_RUNTIME_INTERPRETER_LOCK_COUNT_DATA_H_ diff --git a/runtime/interpreter/shadow_frame.cc b/runtime/interpreter/shadow_frame.cc new file mode 100644 index 0000000000..ab154cf767 --- /dev/null +++ b/runtime/interpreter/shadow_frame.cc @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "shadow_frame.h" + +#include "art_method-inl.h" + +namespace art { + +mirror::Object* ShadowFrame::GetThisObject() const { + ArtMethod* m = GetMethod(); + if (m->IsStatic()) { + return nullptr; + } else if (m->IsNative()) { + return GetVRegReference(0); + } else { + const DexFile::CodeItem* code_item = m->GetCodeItem(); + CHECK(code_item != nullptr) << ArtMethod::PrettyMethod(m); + uint16_t reg = code_item->registers_size_ - code_item->ins_size_; + return GetVRegReference(reg); + } +} + +mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const { + ArtMethod* m = GetMethod(); + if (m->IsStatic()) { + return nullptr; + } else { + return GetVRegReference(NumberOfVRegs() - num_ins); + } +} + +} // namespace art diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h new file mode 100644 index 0000000000..69b2382cbc --- /dev/null +++ b/runtime/interpreter/shadow_frame.h @@ -0,0 +1,431 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ +#define ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ + +#include <cstring> +#include <stdint.h> +#include <string> + +#include "base/macros.h" +#include "base/mutex.h" +#include "dex_file.h" +#include "lock_count_data.h" +#include "read_barrier.h" +#include "stack_reference.h" +#include "verify_object.h" + +namespace art { + +namespace mirror { + class Object; +} // namespace mirror + +class ArtMethod; +class ShadowFrame; +class Thread; +union JValue; + +// Forward declaration. Just calls the destructor. +struct ShadowFrameDeleter; +using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>; + +// ShadowFrame has 2 possible layouts: +// - interpreter - separate VRegs and reference arrays. References are in the reference array. +// - JNI - just VRegs, but where every VReg holds a reference. +class ShadowFrame { + public: + // Compute size of ShadowFrame in bytes assuming it has a reference array. + static size_t ComputeSize(uint32_t num_vregs) { + return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) + + (sizeof(StackReference<mirror::Object>) * num_vregs); + } + + // Create ShadowFrame in heap for deoptimization. + static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link, + ArtMethod* method, uint32_t dex_pc) { + uint8_t* memory = new uint8_t[ComputeSize(num_vregs)]; + return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory); + } + + // Delete a ShadowFrame allocated on the heap for deoptimization. + static void DeleteDeoptimizedFrame(ShadowFrame* sf) { + sf->~ShadowFrame(); // Explicitly destruct. + uint8_t* memory = reinterpret_cast<uint8_t*>(sf); + delete[] memory; + } + + // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller. + // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro. +#define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \ + size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \ + void* alloca_mem = alloca(frame_size); \ + ShadowFrameAllocaUniquePtr( \ + ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \ + (alloca_mem))); \ + }) + + ~ShadowFrame() {} + + // TODO(iam): Clean references array up since they're always there, + // we don't need to do conditionals. + bool HasReferenceArray() const { + return true; + } + + uint32_t NumberOfVRegs() const { + return number_of_vregs_; + } + + uint32_t GetDexPC() const { + return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_; + } + + int16_t GetCachedHotnessCountdown() const { + return cached_hotness_countdown_; + } + + void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) { + cached_hotness_countdown_ = cached_hotness_countdown; + } + + int16_t GetHotnessCountdown() const { + return hotness_countdown_; + } + + void SetHotnessCountdown(int16_t hotness_countdown) { + hotness_countdown_ = hotness_countdown; + } + + void SetDexPC(uint32_t dex_pc) { + dex_pc_ = dex_pc; + dex_pc_ptr_ = nullptr; + } + + ShadowFrame* GetLink() const { + return link_; + } + + void SetLink(ShadowFrame* frame) { + DCHECK_NE(this, frame); + link_ = frame; + } + + int32_t GetVReg(size_t i) const { + DCHECK_LT(i, NumberOfVRegs()); + const uint32_t* vreg = &vregs_[i]; + return *reinterpret_cast<const int32_t*>(vreg); + } + + // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts. + int16_t GetVRegShort(size_t i) const { + return static_cast<int16_t>(GetVReg(i)); + } + + uint32_t* GetVRegAddr(size_t i) { + return &vregs_[i]; + } + + uint32_t* GetShadowRefAddr(size_t i) { + DCHECK(HasReferenceArray()); + DCHECK_LT(i, NumberOfVRegs()); + return &vregs_[i + NumberOfVRegs()]; + } + + void SetCodeItem(const DexFile::CodeItem* code_item) { + code_item_ = code_item; + } + + const DexFile::CodeItem* GetCodeItem() const { + return code_item_; + } + + float GetVRegFloat(size_t i) const { + DCHECK_LT(i, NumberOfVRegs()); + // NOTE: Strict-aliasing? + const uint32_t* vreg = &vregs_[i]; + return *reinterpret_cast<const float*>(vreg); + } + + int64_t GetVRegLong(size_t i) const { + DCHECK_LT(i, NumberOfVRegs()); + const uint32_t* vreg = &vregs_[i]; + typedef const int64_t unaligned_int64 __attribute__ ((aligned (4))); + return *reinterpret_cast<unaligned_int64*>(vreg); + } + + double GetVRegDouble(size_t i) const { + DCHECK_LT(i, NumberOfVRegs()); + const uint32_t* vreg = &vregs_[i]; + typedef const double unaligned_double __attribute__ ((aligned (4))); + return *reinterpret_cast<unaligned_double*>(vreg); + } + + // Look up the reference given its virtual register number. + // If this returns non-null then this does not mean the vreg is currently a reference + // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK_LT(i, NumberOfVRegs()); + mirror::Object* ref; + if (HasReferenceArray()) { + ref = References()[i].AsMirrorPtr(); + } else { + const uint32_t* vreg_ptr = &vregs_[i]; + ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr(); + } + if (kUseReadBarrier) { + ReadBarrier::AssertToSpaceInvariant(ref); + } + if (kVerifyFlags & kVerifyReads) { + VerifyObject(ref); + } + return ref; + } + + // Get view of vregs as range of consecutive arguments starting at i. + uint32_t* GetVRegArgs(size_t i) { + return &vregs_[i]; + } + + void SetVReg(size_t i, int32_t val) { + DCHECK_LT(i, NumberOfVRegs()); + uint32_t* vreg = &vregs_[i]; + *reinterpret_cast<int32_t*>(vreg) = val; + // This is needed for moving collectors since these can update the vreg references if they + // happen to agree with references in the reference array. + if (kMovingCollector && HasReferenceArray()) { + References()[i].Clear(); + } + } + + void SetVRegFloat(size_t i, float val) { + DCHECK_LT(i, NumberOfVRegs()); + uint32_t* vreg = &vregs_[i]; + *reinterpret_cast<float*>(vreg) = val; + // This is needed for moving collectors since these can update the vreg references if they + // happen to agree with references in the reference array. + if (kMovingCollector && HasReferenceArray()) { + References()[i].Clear(); + } + } + + void SetVRegLong(size_t i, int64_t val) { + DCHECK_LT(i, NumberOfVRegs()); + uint32_t* vreg = &vregs_[i]; + typedef int64_t unaligned_int64 __attribute__ ((aligned (4))); + *reinterpret_cast<unaligned_int64*>(vreg) = val; + // This is needed for moving collectors since these can update the vreg references if they + // happen to agree with references in the reference array. + if (kMovingCollector && HasReferenceArray()) { + References()[i].Clear(); + References()[i + 1].Clear(); + } + } + + void SetVRegDouble(size_t i, double val) { + DCHECK_LT(i, NumberOfVRegs()); + uint32_t* vreg = &vregs_[i]; + typedef double unaligned_double __attribute__ ((aligned (4))); + *reinterpret_cast<unaligned_double*>(vreg) = val; + // This is needed for moving collectors since these can update the vreg references if they + // happen to agree with references in the reference array. + if (kMovingCollector && HasReferenceArray()) { + References()[i].Clear(); + References()[i + 1].Clear(); + } + } + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK_LT(i, NumberOfVRegs()); + if (kVerifyFlags & kVerifyWrites) { + VerifyObject(val); + } + if (kUseReadBarrier) { + ReadBarrier::AssertToSpaceInvariant(val); + } + uint32_t* vreg = &vregs_[i]; + reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val); + if (HasReferenceArray()) { + References()[i].Assign(val); + } + } + + void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) { + DCHECK(method != nullptr); + DCHECK(method_ != nullptr); + method_ = method; + } + + ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(method_ != nullptr); + return method_; + } + + mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_); + + mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_); + + bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { + if (HasReferenceArray()) { + return ((&References()[0] <= shadow_frame_entry_obj) && + (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1]))); + } else { + uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj); + return ((&vregs_[0] <= shadow_frame_entry) && + (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1]))); + } + } + + LockCountData& GetLockCountData() { + return lock_count_data_; + } + + static size_t LockCountDataOffset() { + return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_); + } + + static size_t LinkOffset() { + return OFFSETOF_MEMBER(ShadowFrame, link_); + } + + static size_t MethodOffset() { + return OFFSETOF_MEMBER(ShadowFrame, method_); + } + + static size_t DexPCOffset() { + return OFFSETOF_MEMBER(ShadowFrame, dex_pc_); + } + + static size_t NumberOfVRegsOffset() { + return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_); + } + + static size_t VRegsOffset() { + return OFFSETOF_MEMBER(ShadowFrame, vregs_); + } + + static size_t ResultRegisterOffset() { + return OFFSETOF_MEMBER(ShadowFrame, result_register_); + } + + static size_t DexPCPtrOffset() { + return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_); + } + + static size_t CodeItemOffset() { + return OFFSETOF_MEMBER(ShadowFrame, code_item_); + } + + static size_t CachedHotnessCountdownOffset() { + return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_); + } + + static size_t HotnessCountdownOffset() { + return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_); + } + + // Create ShadowFrame for interpreter using provided memory. + static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs, + ShadowFrame* link, + ArtMethod* method, + uint32_t dex_pc, + void* memory) { + return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true); + } + + const uint16_t* GetDexPCPtr() { + return dex_pc_ptr_; + } + + void SetDexPCPtr(uint16_t* dex_pc_ptr) { + dex_pc_ptr_ = dex_pc_ptr; + } + + JValue* GetResultRegister() { + return result_register_; + } + + private: + ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, + uint32_t dex_pc, bool has_reference_array) + : link_(link), + method_(method), + result_register_(nullptr), + dex_pc_ptr_(nullptr), + code_item_(nullptr), + number_of_vregs_(num_vregs), + dex_pc_(dex_pc), + cached_hotness_countdown_(0), + hotness_countdown_(0) { + // TODO(iam): Remove this parameter, it's an an artifact of portable removal + DCHECK(has_reference_array); + if (has_reference_array) { + memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>))); + } else { + memset(vregs_, 0, num_vregs * sizeof(uint32_t)); + } + } + + const StackReference<mirror::Object>* References() const { + DCHECK(HasReferenceArray()); + const uint32_t* vreg_end = &vregs_[NumberOfVRegs()]; + return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end); + } + + StackReference<mirror::Object>* References() { + return const_cast<StackReference<mirror::Object>*>( + const_cast<const ShadowFrame*>(this)->References()); + } + + // Link to previous shadow frame or null. + ShadowFrame* link_; + ArtMethod* method_; + JValue* result_register_; + const uint16_t* dex_pc_ptr_; + const DexFile::CodeItem* code_item_; + LockCountData lock_count_data_; // This may contain GC roots when lock counting is active. + const uint32_t number_of_vregs_; + uint32_t dex_pc_; + int16_t cached_hotness_countdown_; + int16_t hotness_countdown_; + + // This is a two-part array: + // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4 + // bytes. + // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is + // ptr-sized. + // In other words when a primitive is stored in vX, the second (reference) part of the array will + // be null. When a reference is stored in vX, the second (reference) part of the array will be a + // copy of vX. + uint32_t vregs_[0]; + + DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame); +}; + +struct ShadowFrameDeleter { + inline void operator()(ShadowFrame* frame) { + if (frame != nullptr) { + frame->~ShadowFrame(); + } + } +}; + +} // namespace art + +#endif // ART_RUNTIME_INTERPRETER_SHADOW_FRAME_H_ diff --git a/runtime/java_frame_root_info.cc b/runtime/java_frame_root_info.cc new file mode 100644 index 0000000000..dd3be5d415 --- /dev/null +++ b/runtime/java_frame_root_info.cc @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "java_frame_root_info.h" + +#include "stack.h" + +namespace art { + +void JavaFrameRootInfo::Describe(std::ostream& os) const { + const StackVisitor* visitor = stack_visitor_; + CHECK(visitor != nullptr); + os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" << + visitor->DescribeLocation() << " vreg=" << vreg_; +} + +} // namespace art diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h new file mode 100644 index 0000000000..25ac6e2a31 --- /dev/null +++ b/runtime/java_frame_root_info.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_JAVA_FRAME_ROOT_INFO_H_ +#define ART_RUNTIME_JAVA_FRAME_ROOT_INFO_H_ + +#include <iosfwd> + +#include "base/macros.h" +#include "base/mutex.h" +#include "gc_root.h" + +namespace art { + +class StackVisitor; + +class JavaFrameRootInfo FINAL : public RootInfo { + public: + JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg) + : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) { + } + void Describe(std::ostream& os) const OVERRIDE + REQUIRES_SHARED(Locks::mutator_lock_); + + size_t GetVReg() const { + return vreg_; + } + const StackVisitor* GetVisitor() const { + return stack_visitor_; + } + + private: + const StackVisitor* const stack_visitor_; + const size_t vreg_; +}; + +} // namespace art + +#endif // ART_RUNTIME_JAVA_FRAME_ROOT_INFO_H_ diff --git a/runtime/managed_stack-inl.h b/runtime/managed_stack-inl.h index f3f31cf8e8..bdf8100cc0 100644 --- a/runtime/managed_stack-inl.h +++ b/runtime/managed_stack-inl.h @@ -23,7 +23,7 @@ #include <stdint.h> #include <string> -#include "stack.h" +#include "interpreter/shadow_frame.h" namespace art { diff --git a/runtime/method_handles.h b/runtime/method_handles.h index e02e62052c..55680f09e7 100644 --- a/runtime/method_handles.h +++ b/runtime/method_handles.h @@ -21,9 +21,9 @@ #include "dex_instruction.h" #include "handle.h" +#include "interpreter/shadow_frame.h" #include "jvalue.h" #include "mirror/class.h" -#include "stack.h" namespace art { @@ -32,8 +32,6 @@ namespace mirror { class MethodType; } // namespace mirror -class ShadowFrame; - // Returns true if there is a possible conversion from |from| to |to| // for a MethodHandle parameter. bool IsParameterTypeConvertible(ObjPtr<mirror::Class> from, diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc index 319b1c2a9c..b3bc6764c9 100644 --- a/runtime/openjdkjvmti/ti_heap.cc +++ b/runtime/openjdkjvmti/ti_heap.cc @@ -23,6 +23,7 @@ #include "class_linker.h" #include "gc/heap.h" #include "gc_root-inl.h" +#include "java_frame_root_info.h" #include "jni_env_ext.h" #include "jni_internal.h" #include "jvmti_weak_table-inl.h" diff --git a/runtime/stack.cc b/runtime/stack.cc index eec0460015..19df0d26a1 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -27,6 +27,7 @@ #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/space/image_space.h" #include "gc/space/space-inl.h" +#include "interpreter/shadow_frame.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" #include "linear_alloc.h" @@ -39,7 +40,6 @@ #include "runtime.h" #include "thread.h" #include "thread_list.h" -#include "verify_object.h" namespace art { @@ -47,29 +47,6 @@ using android::base::StringPrintf; static constexpr bool kDebugStackWalk = false; -mirror::Object* ShadowFrame::GetThisObject() const { - ArtMethod* m = GetMethod(); - if (m->IsStatic()) { - return nullptr; - } else if (m->IsNative()) { - return GetVRegReference(0); - } else { - const DexFile::CodeItem* code_item = m->GetCodeItem(); - CHECK(code_item != nullptr) << ArtMethod::PrettyMethod(m); - uint16_t reg = code_item->registers_size_ - code_item->ins_size_; - return GetVRegReference(reg); - } -} - -mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const { - ArtMethod* m = GetMethod(); - if (m->IsStatic()) { - return nullptr; - } else { - return GetVRegReference(NumberOfVRegs() - num_ins); - } -} - StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, @@ -97,9 +74,10 @@ StackVisitor::StackVisitor(Thread* thread, } } -InlineInfo StackVisitor::GetCurrentInlineInfo() const { - const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); - uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_); +static InlineInfo GetCurrentInlineInfo(const OatQuickMethodHeader* method_header, + uintptr_t cur_quick_frame_pc) + REQUIRES_SHARED(Locks::mutator_lock_) { + uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc); CodeInfo code_info = method_header->GetOptimizedCodeInfo(); CodeInfoEncoding encoding = code_info.ExtractEncoding(); StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); @@ -113,7 +91,8 @@ ArtMethod* StackVisitor::GetMethod() const { } else if (cur_quick_frame_ != nullptr) { if (IsInInlinedFrame()) { size_t depth_in_stack_map = current_inlining_depth_ - 1; - InlineInfo inline_info = GetCurrentInlineInfo(); + InlineInfo inline_info = GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(), + cur_quick_frame_pc_); const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding(); MethodInfo method_info = method_header->GetOptimizedMethodInfo(); @@ -138,8 +117,8 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { size_t depth_in_stack_map = current_inlining_depth_ - 1; const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding(); - return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info.encoding, - depth_in_stack_map); + return GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(), cur_quick_frame_pc_). + GetDexPcAtDepth(encoding.inline_info.encoding, depth_in_stack_map); } else if (cur_oat_quick_method_header_ == nullptr) { return DexFile::kDexNoIndex; } else { @@ -924,134 +903,4 @@ void StackVisitor::WalkStack(bool include_transitions) { template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kYes>(bool); template void StackVisitor::WalkStack<StackVisitor::CountTransitions::kNo>(bool); -void JavaFrameRootInfo::Describe(std::ostream& os) const { - const StackVisitor* visitor = stack_visitor_; - CHECK(visitor != nullptr); - os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" << - visitor->DescribeLocation() << " vreg=" << vreg_; -} - -int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, - uint32_t core_spills, uint32_t fp_spills, - size_t frame_size, int reg, InstructionSet isa) { - PointerSize pointer_size = InstructionSetPointerSize(isa); - if (kIsDebugBuild) { - auto* runtime = Runtime::Current(); - if (runtime != nullptr) { - CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size); - } - } - DCHECK_ALIGNED(frame_size, kStackAlignment); - DCHECK_NE(reg, -1); - int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) - + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) - + sizeof(uint32_t); // Filler. - int num_regs = code_item->registers_size_ - code_item->ins_size_; - int temp_threshold = code_item->registers_size_; - const int max_num_special_temps = 1; - if (reg == temp_threshold) { - // The current method pointer corresponds to special location on stack. - return 0; - } else if (reg >= temp_threshold + max_num_special_temps) { - /* - * Special temporaries may have custom locations and the logic above deals with that. - * However, non-special temporaries are placed relative to the outs. - */ - int temps_start = code_item->outs_size_ * sizeof(uint32_t) - + static_cast<size_t>(pointer_size) /* art method */; - int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t); - return temps_start + relative_offset; - } else if (reg < num_regs) { - int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t); - return locals_start + (reg * sizeof(uint32_t)); - } else { - // Handle ins. - return frame_size + ((reg - num_regs) * sizeof(uint32_t)) - + static_cast<size_t>(pointer_size) /* art method */; - } -} - -void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) { - if (obj == nullptr) { - return; - } - - // If there's an error during enter, we won't have locked the monitor. So check there's no - // exception. - if (self->IsExceptionPending()) { - return; - } - - if (monitors_ == nullptr) { - monitors_.reset(new std::vector<mirror::Object*>()); - } - monitors_->push_back(obj); -} - -void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) { - if (obj == nullptr) { - return; - } - bool found_object = false; - if (monitors_ != nullptr) { - // We need to remove one pointer to ref, as duplicates are used for counting recursive locks. - // We arbitrarily choose the first one. - auto it = std::find(monitors_->begin(), monitors_->end(), obj); - if (it != monitors_->end()) { - monitors_->erase(it); - found_object = true; - } - } - if (!found_object) { - // The object wasn't found. Time for an IllegalMonitorStateException. - // The order here isn't fully clear. Assume that any other pending exception is swallowed. - // TODO: Maybe make already pending exception a suppressed exception. - self->ClearException(); - self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;", - "did not lock monitor on object of type '%s' before unlocking", - const_cast<mirror::Object*>(obj)->PrettyTypeOf().c_str()); - } -} - -// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show -// that the object was locked. -void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS { - DCHECK(self != nullptr); - DCHECK(obj != nullptr); - obj->MonitorExit(self); -} - -bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) { - DCHECK(self != nullptr); - if (monitors_ != nullptr) { - if (!monitors_->empty()) { - // There may be an exception pending, if the method is terminating abruptly. Clear it. - // TODO: Should we add this as a suppressed exception? - self->ClearException(); - - // OK, there are monitors that are still locked. To enforce structured locking (and avoid - // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception. - for (mirror::Object* obj : *monitors_) { - MonitorExitHelper(self, obj); - // If this raised an exception, ignore. TODO: Should we add this as suppressed - // exceptions? - if (self->IsExceptionPending()) { - self->ClearException(); - } - } - // Raise an exception, just give the first object as the sample. - mirror::Object* first = (*monitors_)[0]; - self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;", - "did not unlock monitor on object of type '%s'", - mirror::Object::PrettyTypeOf(first).c_str()); - - // To make sure this path is not triggered again, clean out the monitors. - monitors_->clear(); - - return false; - } - } - return true; -} - } // namespace art diff --git a/runtime/stack.h b/runtime/stack.h index fd86f5d2b1..4ef9487724 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -20,15 +20,9 @@ #include <stdint.h> #include <string> -#include "arch/instruction_set.h" #include "base/macros.h" #include "base/mutex.h" -#include "dex_file.h" -#include "gc_root.h" #include "quick/quick_method_frame_info.h" -#include "read_barrier.h" -#include "stack_reference.h" -#include "verify_object.h" namespace art { @@ -39,11 +33,8 @@ namespace mirror { class ArtMethod; class Context; class HandleScope; -class InlineInfo; class OatQuickMethodHeader; -class ScopedObjectAccess; class ShadowFrame; -class StackVisitor; class Thread; union JValue; @@ -62,455 +53,60 @@ enum VRegKind { }; std::ostream& operator<<(std::ostream& os, const VRegKind& rhs); -// Forward declaration. Just calls the destructor. -struct ShadowFrameDeleter; -using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>; - // Size in bytes of the should_deoptimize flag on stack. // We just need 4 bytes for our purpose regardless of the architecture. Frame size // calculation will automatically do alignment for the final frame size. static constexpr size_t kShouldDeoptimizeFlagSize = 4; -// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks. -// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are -// thread roots). -// Note: implementation is split so that the call sites may be optimized to no-ops in case no -// lock counting is necessary. The actual implementation is in the cc file to avoid -// dependencies. -class LockCountData { - public: - // Add the given object to the list of monitors, that is, objects that have been locked. This - // will not throw (but be skipped if there is an exception pending on entry). - void AddMonitor(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); - - // Try to remove the given object from the monitor list, indicating an unlock operation. - // This will throw an IllegalMonitorStateException (clearing any already pending exception), in - // case that there wasn't a lock recorded for the object. - void RemoveMonitorOrThrow(Thread* self, - const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); - - // Check whether all acquired monitors have been released. This will potentially throw an - // IllegalMonitorStateException, clearing any already pending exception. Returns true if the - // check shows that everything is OK wrt/ lock counting, false otherwise. - bool CheckAllMonitorsReleasedOrThrow(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); - - template <typename T, typename... Args> - void VisitMonitors(T visitor, Args&&... args) REQUIRES_SHARED(Locks::mutator_lock_) { - if (monitors_ != nullptr) { - // Visitors may change the Object*. Be careful with the foreach loop. - for (mirror::Object*& obj : *monitors_) { - visitor(/* inout */ &obj, std::forward<Args>(args)...); - } - } - } - - private: - // Stores references to the locked-on objects. As noted, this should be visited during thread - // marking. - std::unique_ptr<std::vector<mirror::Object*>> monitors_; -}; - -// ShadowFrame has 2 possible layouts: -// - interpreter - separate VRegs and reference arrays. References are in the reference array. -// - JNI - just VRegs, but where every VReg holds a reference. -class ShadowFrame { - public: - // Compute size of ShadowFrame in bytes assuming it has a reference array. - static size_t ComputeSize(uint32_t num_vregs) { - return sizeof(ShadowFrame) + (sizeof(uint32_t) * num_vregs) + - (sizeof(StackReference<mirror::Object>) * num_vregs); - } - - // Create ShadowFrame in heap for deoptimization. - static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link, - ArtMethod* method, uint32_t dex_pc) { - uint8_t* memory = new uint8_t[ComputeSize(num_vregs)]; - return CreateShadowFrameImpl(num_vregs, link, method, dex_pc, memory); - } - - // Delete a ShadowFrame allocated on the heap for deoptimization. - static void DeleteDeoptimizedFrame(ShadowFrame* sf) { - sf->~ShadowFrame(); // Explicitly destruct. - uint8_t* memory = reinterpret_cast<uint8_t*>(sf); - delete[] memory; - } - - // Create a shadow frame in a fresh alloca. This needs to be in the context of the caller. - // Inlining doesn't work, the compiler will still undo the alloca. So this needs to be a macro. -#define CREATE_SHADOW_FRAME(num_vregs, link, method, dex_pc) ({ \ - size_t frame_size = ShadowFrame::ComputeSize(num_vregs); \ - void* alloca_mem = alloca(frame_size); \ - ShadowFrameAllocaUniquePtr( \ - ShadowFrame::CreateShadowFrameImpl((num_vregs), (link), (method), (dex_pc), \ - (alloca_mem))); \ - }) - - ~ShadowFrame() {} - - // TODO(iam): Clean references array up since they're always there, - // we don't need to do conditionals. - bool HasReferenceArray() const { - return true; - } - - uint32_t NumberOfVRegs() const { - return number_of_vregs_; - } - - uint32_t GetDexPC() const { - return (dex_pc_ptr_ == nullptr) ? dex_pc_ : dex_pc_ptr_ - code_item_->insns_; - } - - int16_t GetCachedHotnessCountdown() const { - return cached_hotness_countdown_; - } - - void SetCachedHotnessCountdown(int16_t cached_hotness_countdown) { - cached_hotness_countdown_ = cached_hotness_countdown; - } - - int16_t GetHotnessCountdown() const { - return hotness_countdown_; - } - - void SetHotnessCountdown(int16_t hotness_countdown) { - hotness_countdown_ = hotness_countdown; - } - - void SetDexPC(uint32_t dex_pc) { - dex_pc_ = dex_pc; - dex_pc_ptr_ = nullptr; - } - - ShadowFrame* GetLink() const { - return link_; - } - - void SetLink(ShadowFrame* frame) { - DCHECK_NE(this, frame); - link_ = frame; - } - - int32_t GetVReg(size_t i) const { - DCHECK_LT(i, NumberOfVRegs()); - const uint32_t* vreg = &vregs_[i]; - return *reinterpret_cast<const int32_t*>(vreg); - } - - // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts. - int16_t GetVRegShort(size_t i) const { - return static_cast<int16_t>(GetVReg(i)); - } - - uint32_t* GetVRegAddr(size_t i) { - return &vregs_[i]; - } - - uint32_t* GetShadowRefAddr(size_t i) { - DCHECK(HasReferenceArray()); - DCHECK_LT(i, NumberOfVRegs()); - return &vregs_[i + NumberOfVRegs()]; - } - - void SetCodeItem(const DexFile::CodeItem* code_item) { - code_item_ = code_item; - } - - const DexFile::CodeItem* GetCodeItem() const { - return code_item_; - } - - float GetVRegFloat(size_t i) const { - DCHECK_LT(i, NumberOfVRegs()); - // NOTE: Strict-aliasing? - const uint32_t* vreg = &vregs_[i]; - return *reinterpret_cast<const float*>(vreg); - } - - int64_t GetVRegLong(size_t i) const { - DCHECK_LT(i, NumberOfVRegs()); - const uint32_t* vreg = &vregs_[i]; - typedef const int64_t unaligned_int64 __attribute__ ((aligned (4))); - return *reinterpret_cast<unaligned_int64*>(vreg); - } - - double GetVRegDouble(size_t i) const { - DCHECK_LT(i, NumberOfVRegs()); - const uint32_t* vreg = &vregs_[i]; - typedef const double unaligned_double __attribute__ ((aligned (4))); - return *reinterpret_cast<unaligned_double*>(vreg); - } - - // Look up the reference given its virtual register number. - // If this returns non-null then this does not mean the vreg is currently a reference - // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK_LT(i, NumberOfVRegs()); - mirror::Object* ref; - if (HasReferenceArray()) { - ref = References()[i].AsMirrorPtr(); - } else { - const uint32_t* vreg_ptr = &vregs_[i]; - ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr(); - } - if (kUseReadBarrier) { - ReadBarrier::AssertToSpaceInvariant(ref); - } - if (kVerifyFlags & kVerifyReads) { - VerifyObject(ref); - } - return ref; - } - - // Get view of vregs as range of consecutive arguments starting at i. - uint32_t* GetVRegArgs(size_t i) { - return &vregs_[i]; - } - - void SetVReg(size_t i, int32_t val) { - DCHECK_LT(i, NumberOfVRegs()); - uint32_t* vreg = &vregs_[i]; - *reinterpret_cast<int32_t*>(vreg) = val; - // This is needed for moving collectors since these can update the vreg references if they - // happen to agree with references in the reference array. - if (kMovingCollector && HasReferenceArray()) { - References()[i].Clear(); - } - } - - void SetVRegFloat(size_t i, float val) { - DCHECK_LT(i, NumberOfVRegs()); - uint32_t* vreg = &vregs_[i]; - *reinterpret_cast<float*>(vreg) = val; - // This is needed for moving collectors since these can update the vreg references if they - // happen to agree with references in the reference array. - if (kMovingCollector && HasReferenceArray()) { - References()[i].Clear(); - } - } - - void SetVRegLong(size_t i, int64_t val) { - DCHECK_LT(i, NumberOfVRegs()); - uint32_t* vreg = &vregs_[i]; - typedef int64_t unaligned_int64 __attribute__ ((aligned (4))); - *reinterpret_cast<unaligned_int64*>(vreg) = val; - // This is needed for moving collectors since these can update the vreg references if they - // happen to agree with references in the reference array. - if (kMovingCollector && HasReferenceArray()) { - References()[i].Clear(); - References()[i + 1].Clear(); - } - } - - void SetVRegDouble(size_t i, double val) { - DCHECK_LT(i, NumberOfVRegs()); - uint32_t* vreg = &vregs_[i]; - typedef double unaligned_double __attribute__ ((aligned (4))); - *reinterpret_cast<unaligned_double*>(vreg) = val; - // This is needed for moving collectors since these can update the vreg references if they - // happen to agree with references in the reference array. - if (kMovingCollector && HasReferenceArray()) { - References()[i].Clear(); - References()[i + 1].Clear(); - } - } - - template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK_LT(i, NumberOfVRegs()); - if (kVerifyFlags & kVerifyWrites) { - VerifyObject(val); - } - if (kUseReadBarrier) { - ReadBarrier::AssertToSpaceInvariant(val); - } - uint32_t* vreg = &vregs_[i]; - reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val); - if (HasReferenceArray()) { - References()[i].Assign(val); - } - } - - void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_) { - DCHECK(method != nullptr); - DCHECK(method_ != nullptr); - method_ = method; - } - - ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK(method_ != nullptr); - return method_; - } - - mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_); - - mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_); - - bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { - if (HasReferenceArray()) { - return ((&References()[0] <= shadow_frame_entry_obj) && - (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1]))); - } else { - uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj); - return ((&vregs_[0] <= shadow_frame_entry) && - (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1]))); - } - } - - LockCountData& GetLockCountData() { - return lock_count_data_; - } - - static size_t LockCountDataOffset() { - return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_); - } - - static size_t LinkOffset() { - return OFFSETOF_MEMBER(ShadowFrame, link_); - } - - static size_t MethodOffset() { - return OFFSETOF_MEMBER(ShadowFrame, method_); - } - - static size_t DexPCOffset() { - return OFFSETOF_MEMBER(ShadowFrame, dex_pc_); - } - - static size_t NumberOfVRegsOffset() { - return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_); - } - - static size_t VRegsOffset() { - return OFFSETOF_MEMBER(ShadowFrame, vregs_); - } - - static size_t ResultRegisterOffset() { - return OFFSETOF_MEMBER(ShadowFrame, result_register_); - } - - static size_t DexPCPtrOffset() { - return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_); - } - - static size_t CodeItemOffset() { - return OFFSETOF_MEMBER(ShadowFrame, code_item_); - } - - static size_t CachedHotnessCountdownOffset() { - return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_); - } - - static size_t HotnessCountdownOffset() { - return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_); - } - - // Create ShadowFrame for interpreter using provided memory. - static ShadowFrame* CreateShadowFrameImpl(uint32_t num_vregs, - ShadowFrame* link, - ArtMethod* method, - uint32_t dex_pc, - void* memory) { - return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true); - } - - const uint16_t* GetDexPCPtr() { - return dex_pc_ptr_; - } - - void SetDexPCPtr(uint16_t* dex_pc_ptr) { - dex_pc_ptr_ = dex_pc_ptr; - } - - JValue* GetResultRegister() { - return result_register_; - } - - private: - ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, - uint32_t dex_pc, bool has_reference_array) - : link_(link), - method_(method), - result_register_(nullptr), - dex_pc_ptr_(nullptr), - code_item_(nullptr), - number_of_vregs_(num_vregs), - dex_pc_(dex_pc), - cached_hotness_countdown_(0), - hotness_countdown_(0) { - // TODO(iam): Remove this parameter, it's an an artifact of portable removal - DCHECK(has_reference_array); - if (has_reference_array) { - memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>))); - } else { - memset(vregs_, 0, num_vregs * sizeof(uint32_t)); - } - } - - const StackReference<mirror::Object>* References() const { - DCHECK(HasReferenceArray()); - const uint32_t* vreg_end = &vregs_[NumberOfVRegs()]; - return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end); - } - - StackReference<mirror::Object>* References() { - return const_cast<StackReference<mirror::Object>*>( - const_cast<const ShadowFrame*>(this)->References()); - } - - // Link to previous shadow frame or null. - ShadowFrame* link_; - ArtMethod* method_; - JValue* result_register_; - const uint16_t* dex_pc_ptr_; - const DexFile::CodeItem* code_item_; - LockCountData lock_count_data_; // This may contain GC roots when lock counting is active. - const uint32_t number_of_vregs_; - uint32_t dex_pc_; - int16_t cached_hotness_countdown_; - int16_t hotness_countdown_; - - // This is a two-part array: - // - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4 - // bytes. - // - [number_of_vregs..number_of_vregs*2) holds only reference registers. Each element here is - // ptr-sized. - // In other words when a primitive is stored in vX, the second (reference) part of the array will - // be null. When a reference is stored in vX, the second (reference) part of the array will be a - // copy of vX. - uint32_t vregs_[0]; - - DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame); -}; - -struct ShadowFrameDeleter { - inline void operator()(ShadowFrame* frame) { - if (frame != nullptr) { - frame->~ShadowFrame(); - } - } -}; - -class JavaFrameRootInfo FINAL : public RootInfo { - public: - JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg) - : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) { - } - void Describe(std::ostream& os) const OVERRIDE - REQUIRES_SHARED(Locks::mutator_lock_); - - size_t GetVReg() const { - return vreg_; - } - const StackVisitor* GetVisitor() const { - return stack_visitor_; - } - - private: - const StackVisitor* const stack_visitor_; - const size_t vreg_; -}; +/* + * Our current stack layout. + * The Dalvik registers come first, followed by the + * Method*, followed by other special temporaries if any, followed by + * regular compiler temporary. As of now we only have the Method* as + * as a special compiler temporary. + * A compiler temporary can be thought of as a virtual register that + * does not exist in the dex but holds intermediate values to help + * optimizations and code generation. A special compiler temporary is + * one whose location in frame is well known while non-special ones + * do not have a requirement on location in frame as long as code + * generator itself knows how to access them. + * + * TODO: Update this documentation? + * + * +-------------------------------+ + * | IN[ins-1] | {Note: resides in caller's frame} + * | . | + * | IN[0] | + * | caller's ArtMethod | ... ArtMethod* + * +===============================+ {Note: start of callee's frame} + * | core callee-save spill | {variable sized} + * +-------------------------------+ + * | fp callee-save spill | + * +-------------------------------+ + * | filler word | {For compatibility, if V[locals-1] used as wide + * +-------------------------------+ + * | V[locals-1] | + * | V[locals-2] | + * | . | + * | . | ... (reg == 2) + * | V[1] | ... (reg == 1) + * | V[0] | ... (reg == 0) <---- "locals_start" + * +-------------------------------+ + * | stack alignment padding | {0 to (kStackAlignWords-1) of padding} + * +-------------------------------+ + * | Compiler temp region | ... (reg >= max_num_special_temps) + * | . | + * | . | + * | V[max_num_special_temps + 1] | + * | V[max_num_special_temps + 0] | + * +-------------------------------+ + * | OUT[outs-1] | + * | OUT[outs-2] | + * | . | + * | OUT[0] | + * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned + * +===============================+ + */ class StackVisitor { public: @@ -619,80 +215,10 @@ class StackVisitor { uintptr_t* GetGPRAddress(uint32_t reg) const; - // This is a fast-path for getting/setting values in a quick frame. - uint32_t* GetVRegAddrFromQuickCode(ArtMethod** cur_quick_frame, - const DexFile::CodeItem* code_item, - uint32_t core_spills, uint32_t fp_spills, size_t frame_size, - uint16_t vreg) const { - int offset = GetVRegOffsetFromQuickCode( - code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA); - DCHECK_EQ(cur_quick_frame, GetCurrentQuickFrame()); - uint8_t* vreg_addr = reinterpret_cast<uint8_t*>(cur_quick_frame) + offset; - return reinterpret_cast<uint32_t*>(vreg_addr); - } - uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_); void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_); - /* - * Return sp-relative offset for a Dalvik virtual register, compiler - * spill or Method* in bytes using Method*. - * Note that (reg == -1) denotes an invalid Dalvik register. For the - * positive values, the Dalvik registers come first, followed by the - * Method*, followed by other special temporaries if any, followed by - * regular compiler temporary. As of now we only have the Method* as - * as a special compiler temporary. - * A compiler temporary can be thought of as a virtual register that - * does not exist in the dex but holds intermediate values to help - * optimizations and code generation. A special compiler temporary is - * one whose location in frame is well known while non-special ones - * do not have a requirement on location in frame as long as code - * generator itself knows how to access them. - * - * +-------------------------------+ - * | IN[ins-1] | {Note: resides in caller's frame} - * | . | - * | IN[0] | - * | caller's ArtMethod | ... ArtMethod* - * +===============================+ {Note: start of callee's frame} - * | core callee-save spill | {variable sized} - * +-------------------------------+ - * | fp callee-save spill | - * +-------------------------------+ - * | filler word | {For compatibility, if V[locals-1] used as wide - * +-------------------------------+ - * | V[locals-1] | - * | V[locals-2] | - * | . | - * | . | ... (reg == 2) - * | V[1] | ... (reg == 1) - * | V[0] | ... (reg == 0) <---- "locals_start" - * +-------------------------------+ - * | stack alignment padding | {0 to (kStackAlignWords-1) of padding} - * +-------------------------------+ - * | Compiler temp region | ... (reg >= max_num_special_temps) - * | . | - * | . | - * | V[max_num_special_temps + 1] | - * | V[max_num_special_temps + 0] | - * +-------------------------------+ - * | OUT[outs-1] | - * | OUT[outs-2] | - * | . | - * | OUT[0] | - * | ArtMethod* | ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned - * +===============================+ - */ - static int GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, - uint32_t core_spills, uint32_t fp_spills, - size_t frame_size, int reg, InstructionSet isa); - - static int GetOutVROffset(uint16_t out_num, InstructionSet isa) { - // According to stack model, the first out is above the Method referernce. - return static_cast<size_t>(InstructionSetPointerSize(isa)) + out_num * sizeof(uint32_t); - } - bool IsInInlinedFrame() const { return current_inlining_depth_ != 0; } @@ -774,8 +300,6 @@ class StackVisitor { void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_); - InlineInfo GetCurrentInlineInfo() const REQUIRES_SHARED(Locks::mutator_lock_); - Thread* const thread_; const StackWalkKind walk_kind_; ShadowFrame* cur_shadow_frame_; diff --git a/runtime/thread.cc b/runtime/thread.cc index 789f571253..4ddf217ca1 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -58,6 +58,8 @@ #include "gc_root.h" #include "handle_scope-inl.h" #include "indirect_reference_table-inl.h" +#include "interpreter/shadow_frame.h" +#include "java_frame_root_info.h" #include "java_vm_ext.h" #include "jni_internal.h" #include "mirror/class_loader.h" |