Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_ |
| 18 | #define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_ |
| 19 | |
Andreas Gampe | 5794381 | 2017-12-06 21:39:13 -0800 | [diff] [blame] | 20 | #include <android-base/logging.h> |
| 21 | |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 22 | #include "base/arena_containers.h" |
Andreas Gampe | 5794381 | 2017-12-06 21:39:13 -0800 | [diff] [blame] | 23 | #include "base/macros.h" |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 24 | #include "constants_arm.h" |
| 25 | #include "offsets.h" |
| 26 | #include "utils/arm/assembler_arm_shared.h" |
| 27 | #include "utils/arm/managed_register_arm.h" |
| 28 | #include "utils/assembler.h" |
| 29 | #include "utils/jni_macro_assembler.h" |
| 30 | |
| 31 | // TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas. |
| 32 | #pragma GCC diagnostic push |
| 33 | #pragma GCC diagnostic ignored "-Wshadow" |
| 34 | #include "aarch32/macro-assembler-aarch32.h" |
| 35 | #pragma GCC diagnostic pop |
| 36 | |
| 37 | namespace vixl32 = vixl::aarch32; |
| 38 | |
| 39 | namespace art { |
| 40 | namespace arm { |
| 41 | |
xueliang.zhong | f51bc62 | 2016-11-04 09:23:32 +0000 | [diff] [blame] | 42 | class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler { |
| 43 | public: |
Scott Wakeling | f8d19c2 | 2016-12-20 09:43:32 +0000 | [diff] [blame] | 44 | // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and |
| 45 | // fewer system calls than a larger default capacity. |
| 46 | static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB; |
| 47 | |
| 48 | ArmVIXLMacroAssembler() |
| 49 | : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {} |
| 50 | |
xueliang.zhong | f51bc62 | 2016-11-04 09:23:32 +0000 | [diff] [blame] | 51 | // The following interfaces can generate CMP+Bcc or Cbz/Cbnz. |
| 52 | // CMP+Bcc are generated by default. |
| 53 | // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz, |
| 54 | // then Cbz/Cbnz is generated. |
| 55 | // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz. |
| 56 | // In T32, Cbz/Cbnz instructions have following limitations: |
| 57 | // - Far targets, which are over 126 bytes away, are not supported. |
| 58 | // - Only low registers can be encoded. |
| 59 | // - Backward branches are not supported. |
| 60 | void CompareAndBranchIfZero(vixl32::Register rn, |
| 61 | vixl32::Label* label, |
| 62 | bool is_far_target = true); |
| 63 | void CompareAndBranchIfNonZero(vixl32::Register rn, |
| 64 | vixl32::Label* label, |
| 65 | bool is_far_target = true); |
Scott Wakeling | bffdc70 | 2016-12-07 17:46:03 +0000 | [diff] [blame] | 66 | |
| 67 | // In T32 some of the instructions (add, mov, etc) outside an IT block |
| 68 | // have only 32-bit encodings. But there are 16-bit flag setting |
| 69 | // versions of these instructions (adds, movs, etc). In most of the |
| 70 | // cases in ART we don't care if the instructions keep flags or not; |
| 71 | // thus we can benefit from smaller code size. |
| 72 | // VIXL will never generate flag setting versions (for example, adds |
| 73 | // for Add macro instruction) unless vixl32::DontCare option is |
| 74 | // explicitly specified. That's why we introduce wrappers to use |
| 75 | // DontCare option by default. |
| 76 | #define WITH_FLAGS_DONT_CARE_RD_RN_OP(func_name) \ |
| 77 | void (func_name)(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { \ |
| 78 | MacroAssembler::func_name(vixl32::DontCare, rd, rn, operand); \ |
| 79 | } \ |
| 80 | using MacroAssembler::func_name |
| 81 | |
| 82 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Adc); |
| 83 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Sub); |
| 84 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Sbc); |
| 85 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsb); |
| 86 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsc); |
| 87 | |
| 88 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Eor); |
| 89 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Orr); |
| 90 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Orn); |
| 91 | WITH_FLAGS_DONT_CARE_RD_RN_OP(And); |
| 92 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Bic); |
| 93 | |
| 94 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Asr); |
| 95 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsr); |
| 96 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsl); |
| 97 | WITH_FLAGS_DONT_CARE_RD_RN_OP(Ror); |
| 98 | |
| 99 | #undef WITH_FLAGS_DONT_CARE_RD_RN_OP |
| 100 | |
| 101 | #define WITH_FLAGS_DONT_CARE_RD_OP(func_name) \ |
| 102 | void (func_name)(vixl32::Register rd, const vixl32::Operand& operand) { \ |
| 103 | MacroAssembler::func_name(vixl32::DontCare, rd, operand); \ |
| 104 | } \ |
| 105 | using MacroAssembler::func_name |
| 106 | |
| 107 | WITH_FLAGS_DONT_CARE_RD_OP(Mvn); |
| 108 | WITH_FLAGS_DONT_CARE_RD_OP(Mov); |
| 109 | |
| 110 | #undef WITH_FLAGS_DONT_CARE_RD_OP |
| 111 | |
| 112 | // The following two functions don't fall into above categories. Overload them separately. |
| 113 | void Rrx(vixl32::Register rd, vixl32::Register rn) { |
| 114 | MacroAssembler::Rrx(vixl32::DontCare, rd, rn); |
| 115 | } |
| 116 | using MacroAssembler::Rrx; |
| 117 | |
| 118 | void Mul(vixl32::Register rd, vixl32::Register rn, vixl32::Register rm) { |
| 119 | MacroAssembler::Mul(vixl32::DontCare, rd, rn, rm); |
| 120 | } |
| 121 | using MacroAssembler::Mul; |
| 122 | |
| 123 | // TODO: Remove when MacroAssembler::Add(FlagsUpdate, Condition, Register, Register, Operand) |
| 124 | // makes the right decision about 16-bit encodings. |
| 125 | void Add(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { |
Artem Serov | 517d9f6 | 2016-12-12 15:51:15 +0000 | [diff] [blame] | 126 | if (rd.Is(rn) && operand.IsPlainRegister()) { |
Scott Wakeling | bffdc70 | 2016-12-07 17:46:03 +0000 | [diff] [blame] | 127 | MacroAssembler::Add(rd, rn, operand); |
| 128 | } else { |
| 129 | MacroAssembler::Add(vixl32::DontCare, rd, rn, operand); |
| 130 | } |
| 131 | } |
| 132 | using MacroAssembler::Add; |
| 133 | |
| 134 | // These interfaces try to use 16-bit T2 encoding of B instruction. |
| 135 | void B(vixl32::Label* label); |
Artem Serov | 517d9f6 | 2016-12-12 15:51:15 +0000 | [diff] [blame] | 136 | // For B(label), we always try to use Narrow encoding, because 16-bit T2 encoding supports |
| 137 | // jumping within 2KB range. For B(cond, label), because the supported branch range is 256 |
| 138 | // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps. |
| 139 | void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true); |
Artem Serov | 6471693 | 2017-02-10 13:39:43 +0000 | [diff] [blame] | 140 | |
| 141 | // Use literal for generating double constant if it doesn't fit VMOV encoding. |
| 142 | void Vmov(vixl32::DRegister rd, double imm) { |
| 143 | if (vixl::VFP::IsImmFP64(imm)) { |
| 144 | MacroAssembler::Vmov(rd, imm); |
| 145 | } else { |
| 146 | MacroAssembler::Vldr(rd, imm); |
| 147 | } |
| 148 | } |
| 149 | using MacroAssembler::Vmov; |
xueliang.zhong | f51bc62 | 2016-11-04 09:23:32 +0000 | [diff] [blame] | 150 | }; |
| 151 | |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 152 | class ArmVIXLAssembler FINAL : public Assembler { |
| 153 | private: |
| 154 | class ArmException; |
| 155 | public: |
Vladimir Marko | e764d2e | 2017-10-05 14:35:55 +0100 | [diff] [blame] | 156 | explicit ArmVIXLAssembler(ArenaAllocator* allocator) |
| 157 | : Assembler(allocator) { |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 158 | // Use Thumb2 instruction set. |
| 159 | vixl_masm_.UseT32(); |
| 160 | } |
| 161 | |
| 162 | virtual ~ArmVIXLAssembler() {} |
xueliang.zhong | f51bc62 | 2016-11-04 09:23:32 +0000 | [diff] [blame] | 163 | ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; } |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 164 | void FinalizeCode() OVERRIDE; |
| 165 | |
| 166 | // Size of generated code. |
| 167 | size_t CodeSize() const OVERRIDE; |
| 168 | const uint8_t* CodeBufferBaseAddress() const OVERRIDE; |
| 169 | |
| 170 | // Copy instructions out of assembly buffer into the given region of memory. |
| 171 | void FinalizeInstructions(const MemoryRegion& region) OVERRIDE; |
| 172 | |
| 173 | void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE { |
| 174 | UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM"; |
| 175 | } |
| 176 | void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE { |
| 177 | UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM"; |
| 178 | } |
| 179 | |
| 180 | // |
| 181 | // Heap poisoning. |
| 182 | // |
Roland Levillain | 5daa495 | 2017-07-03 17:23:56 +0100 | [diff] [blame] | 183 | |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 184 | // Poison a heap reference contained in `reg`. |
| 185 | void PoisonHeapReference(vixl32::Register reg); |
| 186 | // Unpoison a heap reference contained in `reg`. |
| 187 | void UnpoisonHeapReference(vixl32::Register reg); |
Anton Kirilov | e28d9ae | 2016-10-25 18:17:23 +0100 | [diff] [blame] | 188 | // Poison a heap reference contained in `reg` if heap poisoning is enabled. |
| 189 | void MaybePoisonHeapReference(vixl32::Register reg); |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 190 | // Unpoison a heap reference contained in `reg` if heap poisoning is enabled. |
| 191 | void MaybeUnpoisonHeapReference(vixl32::Register reg); |
| 192 | |
Roland Levillain | 5daa495 | 2017-07-03 17:23:56 +0100 | [diff] [blame] | 193 | // Emit code checking the status of the Marking Register, and aborting |
| 194 | // the program if MR does not match the value stored in the art::Thread |
| 195 | // object. |
| 196 | // |
| 197 | // Argument `temp` is used as a temporary register to generate code. |
| 198 | // Argument `code` is used to identify the different occurrences of |
| 199 | // MaybeGenerateMarkingRegisterCheck and is passed to the BKPT instruction. |
| 200 | void GenerateMarkingRegisterCheck(vixl32::Register temp, int code = 0); |
| 201 | |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 202 | void StoreToOffset(StoreOperandType type, |
| 203 | vixl32::Register reg, |
| 204 | vixl32::Register base, |
| 205 | int32_t offset); |
| 206 | void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset); |
| 207 | void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset); |
| 208 | |
| 209 | void LoadImmediate(vixl32::Register dest, int32_t value); |
| 210 | void LoadFromOffset(LoadOperandType type, |
| 211 | vixl32::Register reg, |
| 212 | vixl32::Register base, |
| 213 | int32_t offset); |
| 214 | void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset); |
| 215 | void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset); |
| 216 | |
Scott Wakeling | a7812ae | 2016-10-17 10:03:36 +0100 | [diff] [blame] | 217 | void LoadRegisterList(RegList regs, size_t stack_offset); |
| 218 | void StoreRegisterList(RegList regs, size_t stack_offset); |
| 219 | |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 220 | bool ShifterOperandCanAlwaysHold(uint32_t immediate); |
Artem Serov | 02109dd | 2016-09-23 17:17:54 +0100 | [diff] [blame] | 221 | bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc = kCcDontCare); |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 222 | bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits, |
| 223 | int32_t offset, |
| 224 | /*out*/ int32_t* add_to_base, |
| 225 | /*out*/ int32_t* offset_for_load_store); |
| 226 | int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits, |
| 227 | vixl32::Register temp, |
| 228 | vixl32::Register base, |
| 229 | int32_t offset); |
| 230 | int32_t GetAllowedLoadOffsetBits(LoadOperandType type); |
| 231 | int32_t GetAllowedStoreOffsetBits(StoreOperandType type); |
| 232 | |
| 233 | void AddConstant(vixl32::Register rd, int32_t value); |
| 234 | void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value); |
| 235 | void AddConstantInIt(vixl32::Register rd, |
| 236 | vixl32::Register rn, |
| 237 | int32_t value, |
| 238 | vixl32::Condition cond = vixl32::al); |
| 239 | |
Artem Serov | c5fcb44 | 2016-12-02 19:19:58 +0000 | [diff] [blame] | 240 | template <typename T> |
| 241 | vixl::aarch32::Literal<T>* CreateLiteralDestroyedWithPool(T value) { |
| 242 | vixl::aarch32::Literal<T>* literal = |
| 243 | new vixl::aarch32::Literal<T>(value, |
| 244 | vixl32::RawLiteral::kPlacedWhenUsed, |
| 245 | vixl32::RawLiteral::kDeletedOnPoolDestruction); |
| 246 | return literal; |
| 247 | } |
| 248 | |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 249 | private: |
| 250 | // VIXL assembler. |
xueliang.zhong | f51bc62 | 2016-11-04 09:23:32 +0000 | [diff] [blame] | 251 | ArmVIXLMacroAssembler vixl_masm_; |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 252 | }; |
| 253 | |
| 254 | // Thread register declaration. |
| 255 | extern const vixl32::Register tr; |
Roland Levillain | 6d729a7 | 2017-06-30 18:34:01 +0100 | [diff] [blame] | 256 | // Marking register declaration. |
| 257 | extern const vixl32::Register mr; |
Artem Serov | 12e097c | 2016-08-08 15:13:26 +0100 | [diff] [blame] | 258 | |
| 259 | } // namespace arm |
| 260 | } // namespace art |
| 261 | |
| 262 | #endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_ |