diff options
Diffstat (limited to 'compiler/utils')
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 11 | ||||
| -rw-r--r-- | compiler/utils/array_ref.h | 173 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86.cc | 9 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.cc | 13 |
4 files changed, 193 insertions, 13 deletions
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 27188b2331..009b227209 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -530,7 +530,7 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; // Call *(*(SP + base) + offset) - LoadFromOffset(scratch.AsCoreRegister(), SP, base.Int32Value()); + LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, base.Int32Value()); LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value()); ___ Blr(reg_x(scratch.AsCoreRegister())); } @@ -656,16 +656,17 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, // trashed by native code. ___ Mov(reg_x(ETR), reg_x(TR)); - // Increate frame to required size - must be at least space to push Method*. + // Increase frame to required size - must be at least space to push StackReference<Method>. CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize); size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize); IncreaseFrameSize(adjust); - // Write Method*. - StoreToOffset(X0, SP, 0); + // Write StackReference<Method>. + DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>)); + StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0); // Write out entry spills - int32_t offset = frame_size + kFramePointerSize; + int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>); for (size_t i = 0; i < entry_spills.size(); ++i) { Arm64ManagedRegister reg = entry_spills.at(i).AsArm64(); if (reg.IsNoRegister()) { diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h new file mode 100644 index 0000000000..2d70b7dd31 --- /dev/null +++ b/compiler/utils/array_ref.h @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARRAY_REF_H_ +#define ART_COMPILER_UTILS_ARRAY_REF_H_ + +#include <type_traits> +#include <vector> + +#include "base/logging.h" + +namespace art { + +/** + * @brief A container that references an array. + * + * @details The template class ArrayRef provides a container that references + * an external array. This external array must remain alive while the ArrayRef + * object is in use. The external array may be a std::vector<>-backed storage + * or any other contiguous chunk of memory but that memory must remain valid, + * i.e. the std::vector<> must not be resized for example. + * + * Except for copy/assign and insert/erase/capacity functions, the interface + * is essentially the same as std::vector<>. Since we don't want to throw + * exceptions, at() is also excluded. + */ +template <typename T> +class ArrayRef { + private: + struct tag { }; + + public: + typedef T value_type; + typedef T& reference; + typedef const T& const_reference; + typedef T* pointer; + typedef const T* const_pointer; + typedef T* iterator; + typedef const T* const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + typedef ptrdiff_t difference_type; + typedef size_t size_type; + + // Constructors. + + constexpr ArrayRef() + : array_(nullptr), size_(0u) { + } + + template <size_t size> + constexpr ArrayRef(T (&array)[size]) + : array_(array), size_(size) { + } + + template <typename U, size_t size> + constexpr ArrayRef(U (&array)[size], + typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag()) + : array_(array), size_(size) { + } + + constexpr ArrayRef(T* array, size_t size) + : array_(array), size_(size) { + } + + template <typename U> + constexpr ArrayRef(U* array, size_t size, + typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag()) + : array_(array), size_(size) { + } + + explicit ArrayRef(std::vector<T>& v) + : array_(v.data()), size_(v.size()) { + } + + template <typename U> + ArrayRef(const std::vector<U>& v, + typename std::enable_if<std::is_same<T, const U>::value, tag>::tag t = tag()) + : array_(v.data()), size_(v.size()) { + } + + // Assignment operators. + + ArrayRef& operator=(const ArrayRef& other) { + array_ = other.array_; + size_ = other.size_; + return *this; + } + + template <typename U> + typename std::enable_if<std::is_same<T, const U>::value, ArrayRef>::type& + operator=(const ArrayRef<U>& other) { + return *this = ArrayRef(other); + } + + // Destructor. + ~ArrayRef() = default; + + // Iterators. + iterator begin() { return array_; } + const_iterator begin() const { return array_; } + const_iterator cbegin() const { return array_; } + iterator end() { return array_ + size_; } + const_iterator end() const { return array_ + size_; } + const_iterator cend() const { return array_ + size_; } + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } + const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } + const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); } + + // Size. + size_type size() const { return size_; } + bool empty() const { return size() == 0u; } + + // Element access. NOTE: Not providing at(). + + reference operator[](size_type n) { + DCHECK_LT(n, size_); + return array_[n]; + } + + const_reference operator[](size_type n) const { + DCHECK_LT(n, size_); + return array_[n]; + } + + reference front() { + DCHECK_NE(size_, 0u); + return array_[0]; + } + + const_reference front() const { + DCHECK_NE(size_, 0u); + return array_[0]; + } + + reference back() { + DCHECK_NE(size_, 0u); + return array_[size_ - 1u]; + } + + const_reference back() const { + DCHECK_NE(size_, 0u); + return array_[size_ - 1u]; + } + + value_type* data() { return array_; } + const value_type* data() const { return array_; } + + private: + T* array_; + size_t size_; +}; + +} // namespace art + + +#endif // ART_COMPILER_UTILS_ARRAY_REF_H_ diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 0791c63f90..56c6536fe5 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1411,10 +1411,12 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, } // return address then method on stack addl(ESP, Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) + - kFramePointerSize /*method*/ + kFramePointerSize /*return address*/)); + sizeof(StackReference<mirror::ArtMethod>) /*method*/ + + kFramePointerSize /*return address*/)); pushl(method_reg.AsX86().AsCpuRegister()); for (size_t i = 0; i < entry_spills.size(); ++i) { - movl(Address(ESP, frame_size + kFramePointerSize + (i * kFramePointerSize)), + movl(Address(ESP, frame_size + sizeof(StackReference<mirror::ArtMethod>) + + (i * kFramePointerSize)), entry_spills.at(i).AsX86().AsCpuRegister()); } } @@ -1422,7 +1424,8 @@ void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, void X86Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& spill_regs) { CHECK_ALIGNED(frame_size, kStackAlignment); - addl(ESP, Immediate(frame_size - (spill_regs.size() * kFramePointerSize) - kFramePointerSize)); + addl(ESP, Immediate(frame_size - (spill_regs.size() * kFramePointerSize) - + sizeof(StackReference<mirror::ArtMethod>))); for (size_t i = 0; i < spill_regs.size(); ++i) { popl(spill_regs.at(i).AsX86().AsCpuRegister()); } diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 0ede8755e3..a14551c3b7 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -59,7 +59,6 @@ void X86_64Assembler::call(Label* label) { EmitLabel(label, kSize); } - void X86_64Assembler::pushq(CpuRegister reg) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitOptionalRex32(reg); @@ -1652,8 +1651,12 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, } // return address then method on stack addq(CpuRegister(RSP), Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) + - kFramePointerSize /*method*/ + kFramePointerSize /*return address*/)); - pushq(method_reg.AsX86_64().AsCpuRegister()); + sizeof(StackReference<mirror::ArtMethod>) /*method*/ + + kFramePointerSize /*return address*/)); + + DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>)); + subq(CpuRegister(RSP), Immediate(4)); + movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister()); for (size_t i = 0; i < entry_spills.size(); ++i) { ManagedRegisterSpill spill = entry_spills.at(i); @@ -1732,7 +1735,7 @@ void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { X86_64ManagedRegister src = msrc.AsX86_64(); CHECK(src.IsCpuRegister()); - movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister()); + movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister()); } void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { @@ -2070,7 +2073,7 @@ void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister(); - movq(scratch, Address(CpuRegister(RSP), base)); + movl(scratch, Address(CpuRegister(RSP), base)); call(Address(scratch, offset)); } |