Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_ |
| 18 | #define ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_ |
| 19 | |
| 20 | #include <vector> |
| 21 | |
| 22 | #include "base/macros.h" |
| 23 | #include "constants_mips64.h" |
| 24 | #include "globals.h" |
| 25 | #include "managed_register_mips64.h" |
| 26 | #include "utils/assembler.h" |
| 27 | #include "offsets.h" |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 28 | |
| 29 | namespace art { |
| 30 | namespace mips64 { |
| 31 | |
| 32 | enum LoadOperandType { |
| 33 | kLoadSignedByte, |
| 34 | kLoadUnsignedByte, |
| 35 | kLoadSignedHalfword, |
| 36 | kLoadUnsignedHalfword, |
| 37 | kLoadWord, |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 38 | kLoadUnsignedWord, |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 39 | kLoadDoubleword |
| 40 | }; |
| 41 | |
| 42 | enum StoreOperandType { |
| 43 | kStoreByte, |
| 44 | kStoreHalfword, |
| 45 | kStoreWord, |
| 46 | kStoreDoubleword |
| 47 | }; |
| 48 | |
| 49 | class Mips64Assembler FINAL : public Assembler { |
| 50 | public: |
| 51 | Mips64Assembler() {} |
| 52 | virtual ~Mips64Assembler() {} |
| 53 | |
| 54 | // Emit Machine Instructions. |
| 55 | void Add(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 56 | void Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 57 | void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 58 | void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 59 | void Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 60 | void Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 61 | void Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 62 | void Mult(GpuRegister rs, GpuRegister rt); |
| 63 | void Multu(GpuRegister rs, GpuRegister rt); |
| 64 | void Div(GpuRegister rs, GpuRegister rt); |
| 65 | void Divu(GpuRegister rs, GpuRegister rt); |
| 66 | |
| 67 | void And(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 68 | void Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 69 | void Or(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 70 | void Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 71 | void Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 72 | void Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 73 | void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 74 | |
| 75 | void Sll(GpuRegister rd, GpuRegister rs, int shamt); |
| 76 | void Srl(GpuRegister rd, GpuRegister rs, int shamt); |
| 77 | void Sra(GpuRegister rd, GpuRegister rs, int shamt); |
| 78 | void Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 79 | void Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 80 | void Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 81 | |
| 82 | void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 83 | void Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 84 | void Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 85 | void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 86 | void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 87 | void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
Douglas Leung | d90957f | 2015-04-30 19:22:49 -0700 | [diff] [blame] | 88 | void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 89 | void Lui(GpuRegister rt, uint16_t imm16); |
| 90 | void Mfhi(GpuRegister rd); |
| 91 | void Mflo(GpuRegister rd); |
| 92 | |
| 93 | void Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 94 | void Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 95 | void Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 96 | void Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 97 | |
| 98 | void Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 99 | void Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 100 | void Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 101 | void Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 102 | |
| 103 | void Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 104 | void Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16); |
| 105 | void J(uint32_t address); |
| 106 | void Jal(uint32_t address); |
| 107 | void Jr(GpuRegister rs); |
| 108 | void Jalr(GpuRegister rs); |
| 109 | |
| 110 | void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 111 | void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 112 | void MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 113 | void DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 114 | void AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 115 | void SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 116 | void MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 117 | void DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft); |
| 118 | void MovS(FpuRegister fd, FpuRegister fs); |
| 119 | void MovD(FpuRegister fd, FpuRegister fs); |
| 120 | |
| 121 | void Mfc1(GpuRegister rt, FpuRegister fs); |
| 122 | void Mtc1(FpuRegister ft, GpuRegister rs); |
| 123 | void Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16); |
| 124 | void Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16); |
| 125 | void Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16); |
| 126 | void Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16); |
| 127 | |
| 128 | void Break(); |
| 129 | void Nop(); |
| 130 | void Move(GpuRegister rt, GpuRegister rs); |
| 131 | void Clear(GpuRegister rt); |
| 132 | void Not(GpuRegister rt, GpuRegister rs); |
| 133 | void Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 134 | void Div(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 135 | void Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt); |
| 136 | |
| 137 | void AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value); |
| 138 | void LoadImmediate64(GpuRegister rt, int32_t value); |
| 139 | |
| 140 | void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size); |
| 141 | void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset); |
| 142 | void LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, int32_t offset); |
| 143 | void StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, int32_t offset); |
| 144 | void StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base, int32_t offset); |
| 145 | |
| 146 | // Emit data (e.g. encoded instruction or immediate) to the instruction stream. |
| 147 | void Emit(int32_t value); |
| 148 | void EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal); |
| 149 | void EmitJump(Label* label, bool link); |
| 150 | void Bind(Label* label, bool is_jump); |
| 151 | |
| 152 | // |
| 153 | // Overridden common assembler high-level functionality |
| 154 | // |
| 155 | |
| 156 | // Emit code that will create an activation on the stack |
| 157 | void BuildFrame(size_t frame_size, ManagedRegister method_reg, |
| 158 | const std::vector<ManagedRegister>& callee_save_regs, |
| 159 | const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; |
| 160 | |
| 161 | // Emit code that will remove an activation from the stack |
| 162 | void RemoveFrame(size_t frame_size, |
| 163 | const std::vector<ManagedRegister>& callee_save_regs) OVERRIDE; |
| 164 | |
| 165 | void IncreaseFrameSize(size_t adjust) OVERRIDE; |
| 166 | void DecreaseFrameSize(size_t adjust) OVERRIDE; |
| 167 | |
| 168 | // Store routines |
| 169 | void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE; |
| 170 | void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE; |
| 171 | void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE; |
| 172 | |
| 173 | void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE; |
| 174 | |
| 175 | void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, |
| 176 | ManagedRegister mscratch) OVERRIDE; |
| 177 | |
| 178 | void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, |
| 179 | ManagedRegister mscratch) OVERRIDE; |
| 180 | |
| 181 | void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE; |
| 182 | |
| 183 | void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off, |
| 184 | ManagedRegister mscratch) OVERRIDE; |
| 185 | |
| 186 | // Load routines |
| 187 | void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE; |
| 188 | |
| 189 | void LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) OVERRIDE; |
| 190 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame^] | 191 | void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 192 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame^] | 193 | void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, |
| 194 | bool poison_reference) OVERRIDE; |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 195 | |
| 196 | void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; |
| 197 | |
| 198 | void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset<8> offs) OVERRIDE; |
| 199 | |
| 200 | // Copying routines |
| 201 | void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE; |
| 202 | |
| 203 | void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs, |
| 204 | ManagedRegister mscratch) OVERRIDE; |
| 205 | |
| 206 | void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, |
| 207 | ManagedRegister mscratch) OVERRIDE; |
| 208 | |
| 209 | void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE; |
| 210 | |
| 211 | void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE; |
| 212 | |
| 213 | void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch, |
| 214 | size_t size) OVERRIDE; |
| 215 | |
| 216 | void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, |
| 217 | ManagedRegister mscratch, size_t size) OVERRIDE; |
| 218 | |
| 219 | void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch, |
| 220 | size_t size) OVERRIDE; |
| 221 | |
| 222 | void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset, |
| 223 | ManagedRegister mscratch, size_t size) OVERRIDE; |
| 224 | |
| 225 | void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, |
| 226 | ManagedRegister mscratch, size_t size) OVERRIDE; |
| 227 | |
| 228 | void MemoryBarrier(ManagedRegister) OVERRIDE; |
| 229 | |
| 230 | // Sign extension |
| 231 | void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE; |
| 232 | |
| 233 | // Zero extension |
| 234 | void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE; |
| 235 | |
| 236 | // Exploit fast access in managed code to Thread::Current() |
| 237 | void GetCurrentThread(ManagedRegister tr) OVERRIDE; |
| 238 | void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE; |
| 239 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 240 | // Set up out_reg to hold a Object** into the handle scope, or to be null if the |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 241 | // value is null and null_allowed. in_reg holds a possibly stale reference |
| 242 | // that can be used to avoid loading the handle scope entry to see if the value is |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 243 | // null. |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 244 | void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, |
| 245 | ManagedRegister in_reg, bool null_allowed) OVERRIDE; |
| 246 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 247 | // Set up out_off to hold a Object** into the handle scope, or to be null if the |
Andreas Gampe | 57b3429 | 2015-01-14 15:45:59 -0800 | [diff] [blame] | 248 | // value is null and null_allowed. |
| 249 | void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister |
| 250 | mscratch, bool null_allowed) OVERRIDE; |
| 251 | |
| 252 | // src holds a handle scope entry (Object**) load this into dst |
| 253 | void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE; |
| 254 | |
| 255 | // Heap::VerifyObject on src. In some cases (such as a reference to this) we |
| 256 | // know that src may not be null. |
| 257 | void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE; |
| 258 | void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE; |
| 259 | |
| 260 | // Call to address held at [base+offset] |
| 261 | void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE; |
| 262 | void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE; |
| 263 | void CallFromThread64(ThreadOffset<8> offset, ManagedRegister mscratch) OVERRIDE; |
| 264 | |
| 265 | // Generate code to check if Thread::Current()->exception_ is non-null |
| 266 | // and branch to a ExceptionSlowPath if it is. |
| 267 | void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE; |
| 268 | |
| 269 | private: |
| 270 | void EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, int shamt, int funct); |
| 271 | void EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm); |
| 272 | void EmitJ(int opcode, int address); |
| 273 | void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct); |
| 274 | void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm); |
| 275 | |
| 276 | int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump); |
| 277 | int DecodeBranchOffset(int32_t inst, bool is_jump); |
| 278 | |
| 279 | DISALLOW_COPY_AND_ASSIGN(Mips64Assembler); |
| 280 | }; |
| 281 | |
| 282 | // Slowpath entered when Thread::Current()->_exception is non-null |
| 283 | class Mips64ExceptionSlowPath FINAL : public SlowPath { |
| 284 | public: |
| 285 | explicit Mips64ExceptionSlowPath(Mips64ManagedRegister scratch, size_t stack_adjust) |
| 286 | : scratch_(scratch), stack_adjust_(stack_adjust) {} |
| 287 | virtual void Emit(Assembler *sp_asm) OVERRIDE; |
| 288 | private: |
| 289 | const Mips64ManagedRegister scratch_; |
| 290 | const size_t stack_adjust_; |
| 291 | }; |
| 292 | |
| 293 | } // namespace mips64 |
| 294 | } // namespace art |
| 295 | |
| 296 | #endif // ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_ |