blob: 31cf6675af561a9817e8c4c4d805923bf74fe3c3 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
Andreas Gampe0b9203e2015-01-22 20:39:27 -080019#include "codegen_arm64.h"
20
Elliott Hughes8366ca02014-11-17 12:02:05 -080021#include "arch/instruction_set_features.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010022#include "arm64_lir.h"
Vladimir Marko41b175a2015-05-19 18:08:00 +010023#include "base/bit_utils.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080024#include "base/logging.h"
25#include "dex/compiler_ir.h"
26#include "dex/mir_graph.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010027#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070028#include "dex/reg_storage_eq.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080029#include "driver/compiler_driver.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010030#include "entrypoints/quick/quick_entrypoints.h"
Ian Rogers7e70b002014-10-08 11:47:24 -070031#include "mirror/array-inl.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010032
33namespace art {
34
35LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
36 OpRegReg(kOpCmp, src1, src2);
37 return OpCondBranch(cond, target);
38}
39
Matteo Franchin43ec8732014-03-31 15:00:14 +010040LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070041 UNUSED(ccode, guide);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010042 LOG(FATAL) << "Unexpected use of OpIT for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070043 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +010044}
45
46void Arm64Mir2Lir::OpEndIT(LIR* it) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070047 UNUSED(it);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010048 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +010049}
50
51/*
52 * 64-bit 3way compare function.
Matteo Franchine45fb9e2014-05-06 10:10:30 +010053 * cmp xA, xB
Zheng Xu511c8a62014-06-03 16:22:23 +080054 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1
55 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC
Matteo Franchin43ec8732014-03-31 15:00:14 +010056 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +010057void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
58 RegLocation rl_src2) {
59 RegLocation rl_result;
Matteo Franchin43ec8732014-03-31 15:00:14 +010060 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
61 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010062 rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +010063
Matteo Franchine45fb9e2014-05-06 10:10:30 +010064 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Zheng Xu511c8a62014-06-03 16:22:23 +080065 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
66 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
67 rl_result.reg.GetReg(), kArmCondGe);
68 StoreValue(rl_dest, rl_result);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010069}
70
71void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
72 RegLocation rl_src1, RegLocation rl_shift) {
73 OpKind op = kOpBkpt;
74 switch (opcode) {
75 case Instruction::SHL_LONG:
76 case Instruction::SHL_LONG_2ADDR:
77 op = kOpLsl;
78 break;
79 case Instruction::SHR_LONG:
80 case Instruction::SHR_LONG_2ADDR:
81 op = kOpAsr;
82 break;
83 case Instruction::USHR_LONG:
84 case Instruction::USHR_LONG_2ADDR:
85 op = kOpLsr;
86 break;
87 default:
88 LOG(FATAL) << "Unexpected case: " << opcode;
89 }
Zheng Xue2eb29e2014-06-12 10:22:33 +080090 rl_shift = LoadValue(rl_shift, kCoreReg);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010091 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
92 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +080093 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
Serban Constantinescued65c5e2014-05-22 15:10:18 +010094 StoreValueWide(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +010095}
96
Andreas Gampe90969af2014-07-15 23:02:11 -070097static constexpr bool kUseDeltaEncodingInGenSelect = false;
Andreas Gampe381f8ac2014-07-10 03:23:41 -070098
Andreas Gampe90969af2014-07-15 23:02:11 -070099void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode,
100 RegStorage rs_dest, int result_reg_class) {
101 if (false_val == 0 || // 0 is better as first operand.
102 true_val == 1 || // Potentially Csinc.
103 true_val == -1 || // Potentially Csinv.
104 true_val == false_val + 1) { // Potentially Csinc.
105 ccode = NegateComparison(ccode);
106 std::swap(true_val, false_val);
107 }
108
109 ArmConditionCode code = ArmConditionEncoding(ccode);
110
111 int opcode; // The opcode.
112 RegStorage left_op = RegStorage::InvalidReg(); // The operands.
113 RegStorage right_op = RegStorage::InvalidReg(); // The operands.
114
115 bool is_wide = rs_dest.Is64Bit();
116
117 RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr;
118
119 if (true_val == 0) {
120 left_op = zero_reg;
121 } else {
122 left_op = rs_dest;
123 LoadConstantNoClobber(rs_dest, true_val);
124 }
125 if (false_val == 1) {
126 right_op = zero_reg;
127 opcode = kA64Csinc4rrrc;
128 } else if (false_val == -1) {
129 right_op = zero_reg;
130 opcode = kA64Csinv4rrrc;
131 } else if (false_val == true_val + 1) {
132 right_op = left_op;
133 opcode = kA64Csinc4rrrc;
134 } else if (false_val == -true_val) {
135 right_op = left_op;
136 opcode = kA64Csneg4rrrc;
137 } else if (false_val == ~true_val) {
138 right_op = left_op;
139 opcode = kA64Csinv4rrrc;
140 } else if (true_val == 0) {
141 // left_op is zero_reg.
142 right_op = rs_dest;
143 LoadConstantNoClobber(rs_dest, false_val);
144 opcode = kA64Csel4rrrc;
145 } else {
146 // Generic case.
147 RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
148 if (is_wide) {
149 if (t_reg2.Is32Bit()) {
150 t_reg2 = As64BitReg(t_reg2);
151 }
152 } else {
153 if (t_reg2.Is64Bit()) {
154 t_reg2 = As32BitReg(t_reg2);
155 }
156 }
157
158 if (kUseDeltaEncodingInGenSelect) {
159 int32_t delta = false_val - true_val;
160 uint32_t abs_val = delta < 0 ? -delta : delta;
161
162 if (abs_val < 0x1000) { // TODO: Replace with InexpensiveConstant with opcode.
163 // Can encode as immediate to an add.
164 right_op = t_reg2;
165 OpRegRegImm(kOpAdd, t_reg2, left_op, delta);
166 }
167 }
168
169 // Load as constant.
170 if (!right_op.Valid()) {
171 LoadConstantNoClobber(t_reg2, false_val);
172 right_op = t_reg2;
173 }
174
175 opcode = kA64Csel4rrrc;
176 }
177
178 DCHECK(left_op.Valid() && right_op.Valid());
179 NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(),
180 code);
181}
182
183void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
184 int32_t true_val, int32_t false_val, RegStorage rs_dest,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700185 RegisterClass dest_reg_class) {
Andreas Gampe90969af2014-07-15 23:02:11 -0700186 DCHECK(rs_dest.Valid());
187 OpRegReg(kOpCmp, left_op, right_op);
188 GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
189}
190
191void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700192 UNUSED(bb);
Andreas Gampe90969af2014-07-15 23:02:11 -0700193 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
194 rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700195 // rl_src may be aliased with rl_result/rl_dest, so do compare early.
196 OpRegImm(kOpCmp, rl_src.reg, 0);
197
Andreas Gampe90969af2014-07-15 23:02:11 -0700198 RegLocation rl_dest = mir_graph_->GetDest(mir);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100199
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700200 // The kMirOpSelect has two variants, one for constants and one for moves.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700201 if (mir->ssa_rep->num_uses == 1) {
Andreas Gampe90969af2014-07-15 23:02:11 -0700202 RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
203 GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg,
204 rl_dest.ref ? kRefReg : kCoreReg);
205 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700206 } else {
207 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
208 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
209
Andreas Gampe90969af2014-07-15 23:02:11 -0700210 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700211 rl_true = LoadValue(rl_true, result_reg_class);
212 rl_false = LoadValue(rl_false, result_reg_class);
Andreas Gampe90969af2014-07-15 23:02:11 -0700213 RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700214
Andreas Gampe90969af2014-07-15 23:02:11 -0700215 bool is_wide = rl_dest.ref || rl_dest.wide;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700216 int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
217 NewLIR4(opcode, rl_result.reg.GetReg(),
Andreas Gampe90969af2014-07-15 23:02:11 -0700218 rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode));
219 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700220 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100221}
222
223void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
224 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
225 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100226 LIR* taken = &block_label_list_[bb->taken];
227 LIR* not_taken = &block_label_list_[bb->fall_through];
Matteo Franchin43ec8732014-03-31 15:00:14 +0100228 // Normalize such that if either operand is constant, src2 will be constant.
229 ConditionCode ccode = mir->meta.ccode;
230 if (rl_src1.is_const) {
231 std::swap(rl_src1, rl_src2);
232 ccode = FlipComparisonOrder(ccode);
233 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100234
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700235 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
236
Matteo Franchin43ec8732014-03-31 15:00:14 +0100237 if (rl_src2.is_const) {
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700238 // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
239
Matteo Franchin43ec8732014-03-31 15:00:14 +0100240 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100241 // Special handling using cbz & cbnz.
242 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
243 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
244 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
245 return;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700246 }
247
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100248 // Only handle Imm if src2 is not already in a register.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700249 rl_src2 = UpdateLocWide(rl_src2);
250 if (rl_src2.location != kLocPhysReg) {
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100251 OpRegImm64(kOpCmp, rl_src1.reg, val);
252 OpCondBranch(ccode, taken);
253 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100254 return;
255 }
256 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100257
Matteo Franchin43ec8732014-03-31 15:00:14 +0100258 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100259 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100260 OpCondBranch(ccode, taken);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100261 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100262}
263
264/*
265 * Generate a register comparison to an immediate and branch. Caller
266 * is responsible for setting branch target field.
267 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100268LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
269 LIR* target) {
Andreas Gampe9522af92014-07-14 20:16:59 -0700270 LIR* branch = nullptr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100271 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
Andreas Gampe9522af92014-07-14 20:16:59 -0700272 if (check_value == 0) {
273 if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
Matteo Franchin4163c532014-07-15 15:20:27 +0100274 A64Opcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
275 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700276 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
277 } else if (arm_cond == kArmCondLs) {
278 // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
279 // This case happens for a bounds check of array[0].
Matteo Franchin4163c532014-07-15 15:20:27 +0100280 A64Opcode opcode = kA64Cbz2rt;
281 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700282 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800283 } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
Matteo Franchin4163c532014-07-15 15:20:27 +0100284 A64Opcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
285 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800286 int value = reg.Is64Bit() ? 63 : 31;
287 branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700288 }
289 }
290
291 if (branch == nullptr) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100292 OpRegImm(kOpCmp, reg, check_value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100293 branch = NewLIR2(kA64B2ct, arm_cond, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100294 }
Andreas Gampe9522af92014-07-14 20:16:59 -0700295
Matteo Franchin43ec8732014-03-31 15:00:14 +0100296 branch->target = target;
297 return branch;
298}
299
Zheng Xu7c1c2632014-06-17 18:17:31 +0800300LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
301 RegStorage base_reg, int offset, int check_value,
Dave Allison69dfe512014-07-11 17:11:58 +0000302 LIR* target, LIR** compare) {
303 DCHECK(compare == nullptr);
Zheng Xu7c1c2632014-06-17 18:17:31 +0800304 // It is possible that temp register is 64-bit. (ArgReg or RefReg)
305 // Always compare 32-bit value no matter what temp_reg is.
306 if (temp_reg.Is64Bit()) {
307 temp_reg = As32BitReg(temp_reg);
308 }
309 Load32Disp(base_reg, offset, temp_reg);
310 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
311 return branch;
312}
313
Matteo Franchin43ec8732014-03-31 15:00:14 +0100314LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100315 bool dest_is_fp = r_dest.IsFloat();
316 bool src_is_fp = r_src.IsFloat();
Matteo Franchin4163c532014-07-15 15:20:27 +0100317 A64Opcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100318 LIR* res;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100319
320 if (LIKELY(dest_is_fp == src_is_fp)) {
321 if (LIKELY(!dest_is_fp)) {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700322 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
323
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100324 // Core/core copy.
325 // Copies involving the sp register require a different instruction.
326 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
327
328 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
329 // This currently works because the other arguments are set to 0 by default. We should
330 // rather introduce an alias kA64Mov2RR.
331
332 // core/core copy. Do a x/x copy only if both registers are x.
333 if (r_dest.Is64Bit() && r_src.Is64Bit()) {
334 opcode = WIDE(opcode);
335 }
336 } else {
337 // Float/float copy.
338 bool dest_is_double = r_dest.IsDouble();
339 bool src_is_double = r_src.IsDouble();
340
341 // We do not do float/double or double/float casts here.
342 DCHECK_EQ(dest_is_double, src_is_double);
343
344 // Homogeneous float/float copy.
Matteo Franchin4163c532014-07-15 15:20:27 +0100345 opcode = (dest_is_double) ? WIDE(kA64Fmov2ff) : kA64Fmov2ff;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100346 }
347 } else {
348 // Inhomogeneous register copy.
349 if (dest_is_fp) {
350 if (r_dest.IsDouble()) {
351 opcode = kA64Fmov2Sx;
352 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700353 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100354 opcode = kA64Fmov2sw;
355 }
356 } else {
357 if (r_src.IsDouble()) {
358 opcode = kA64Fmov2xS;
359 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700360 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100361 opcode = kA64Fmov2ws;
362 }
363 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100364 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100365
Matteo Franchin43ec8732014-03-31 15:00:14 +0100366 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100367
Matteo Franchin43ec8732014-03-31 15:00:14 +0100368 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
369 res->flags.is_nop = true;
370 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100371
Matteo Franchin43ec8732014-03-31 15:00:14 +0100372 return res;
373}
374
375void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
376 if (r_dest != r_src) {
377 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
378 AppendLIR(res);
379 }
380}
381
382void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100383 OpRegCopy(r_dest, r_src);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100384}
385
386// Table of magic divisors
387struct MagicTable {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100388 int magic64_base;
389 int magic64_eor;
390 uint64_t magic64;
391 uint32_t magic32;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100392 uint32_t shift;
393 DividePattern pattern;
394};
395
396static const MagicTable magic_table[] = {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100397 { 0, 0, 0, 0, 0, DivideNone}, // 0
398 { 0, 0, 0, 0, 0, DivideNone}, // 1
399 { 0, 0, 0, 0, 0, DivideNone}, // 2
400 {0x3c, -1, 0x5555555555555556, 0x55555556, 0, Divide3}, // 3
401 { 0, 0, 0, 0, 0, DivideNone}, // 4
402 {0xf9, -1, 0x6666666666666667, 0x66666667, 1, Divide5}, // 5
403 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3}, // 6
404 { -1, -1, 0x924924924924924A, 0x92492493, 2, Divide7}, // 7
405 { 0, 0, 0, 0, 0, DivideNone}, // 8
406 { -1, -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5}, // 9
407 {0xf9, -1, 0x6666666666666667, 0x66666667, 2, Divide5}, // 10
408 { -1, -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5}, // 11
409 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5}, // 12
410 { -1, -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5}, // 13
411 { -1, -1, 0x924924924924924A, 0x92492493, 3, Divide7}, // 14
412 {0x78, -1, 0x8888888888888889, 0x88888889, 3, Divide7}, // 15
Matteo Franchin43ec8732014-03-31 15:00:14 +0100413};
414
415// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
416bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100417 RegLocation rl_src, RegLocation rl_dest, int lit) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700418 UNUSED(dalvik_opcode);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100419 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100420 return false;
421 }
422 DividePattern pattern = magic_table[lit].pattern;
423 if (pattern == DivideNone) {
424 return false;
425 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100426 // Tuning: add rem patterns
427 if (!is_div) {
428 return false;
429 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100430
431 RegStorage r_magic = AllocTemp();
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100432 LoadConstant(r_magic, magic_table[lit].magic32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100433 rl_src = LoadValue(rl_src, kCoreReg);
434 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100435 RegStorage r_long_mul = AllocTemp();
Matteo Franchin65420b22014-10-27 13:29:30 +0000436 NewLIR3(kA64Smull3xww, As64BitReg(r_long_mul).GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100437 switch (pattern) {
438 case Divide3:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100439 OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
440 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100441 break;
442 case Divide5:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100443 OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
444 32 + magic_table[lit].shift);
445 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100446 break;
447 case Divide7:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100448 OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
449 As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
450 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
451 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100452 break;
453 default:
454 LOG(FATAL) << "Unexpected pattern: " << pattern;
455 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100456 StoreValue(rl_dest, rl_result);
457 return true;
458}
459
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100460bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
461 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700462 UNUSED(dalvik_opcode);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100463 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
464 return false;
465 }
466 DividePattern pattern = magic_table[lit].pattern;
467 if (pattern == DivideNone) {
468 return false;
469 }
470 // Tuning: add rem patterns
471 if (!is_div) {
472 return false;
473 }
474
475 RegStorage r_magic = AllocTempWide();
476 rl_src = LoadValueWide(rl_src, kCoreReg);
477 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
478 RegStorage r_long_mul = AllocTempWide();
479
480 if (magic_table[lit].magic64_base >= 0) {
481 // Check that the entry in the table is correct.
482 if (kIsDebugBuild) {
483 uint64_t reconstructed_imm;
484 uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base);
485 if (magic_table[lit].magic64_eor >= 0) {
486 uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor);
487 reconstructed_imm = base ^ eor;
488 } else {
489 reconstructed_imm = base + 1;
490 }
Andreas Gampece410622014-11-24 14:23:53 -0800491 DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit;
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100492 }
493
494 // Load the magic constant in two instructions.
495 NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base);
496 if (magic_table[lit].magic64_eor >= 0) {
497 NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(),
498 magic_table[lit].magic64_eor);
499 } else {
500 NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0);
501 }
502 } else {
503 LoadConstantWide(r_magic, magic_table[lit].magic64);
504 }
505
506 NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
507 switch (pattern) {
508 case Divide3:
509 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
510 break;
511 case Divide5:
512 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
513 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
514 break;
515 case Divide7:
516 OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul);
517 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
518 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
519 break;
520 default:
521 LOG(FATAL) << "Unexpected pattern: " << pattern;
522 }
523 StoreValueWide(rl_dest, rl_result);
524 return true;
525}
526
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100527// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
528// and store the result in 'rl_dest'.
529bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
530 RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100531 return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit));
532}
533
534// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
535// and store the result in 'rl_dest'.
536bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
537 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
538 const bool is_64bit = rl_dest.wide;
539 const int nbits = (is_64bit) ? 64 : 32;
540
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100541 if (lit < 2) {
542 return false;
543 }
544 if (!IsPowerOfTwo(lit)) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100545 if (is_64bit) {
546 return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit);
547 } else {
548 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit));
549 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100550 }
Andreas Gampe7e499922015-01-06 08:28:12 -0800551 int k = CTZ(lit);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100552 if (k >= nbits - 2) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100553 // Avoid special cases.
554 return false;
555 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100556
557 RegLocation rl_result;
558 RegStorage t_reg;
559 if (is_64bit) {
560 rl_src = LoadValueWide(rl_src, kCoreReg);
561 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
562 t_reg = AllocTempWide();
563 } else {
564 rl_src = LoadValue(rl_src, kCoreReg);
565 rl_result = EvalLoc(rl_dest, kCoreReg, true);
566 t_reg = AllocTemp();
567 }
568
569 int shift = EncodeShift(kA64Lsr, nbits - k);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100570 if (is_div) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100571 if (lit == 2) {
572 // Division by 2 is by far the most common division by constant.
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100573 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100574 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
575 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100576 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
577 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100578 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
579 }
580 } else {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100581 if (lit == 2) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100582 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
583 OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1);
584 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100585 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100586 RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp();
587 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
588 OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift);
589 OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1);
590 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100591 }
592 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100593
594 if (is_64bit) {
595 StoreValueWide(rl_dest, rl_result);
596 } else {
597 StoreValue(rl_dest, rl_result);
598 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100599 return true;
600}
601
Matteo Franchin43ec8732014-03-31 15:00:14 +0100602bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700603 UNUSED(rl_src, rl_dest, lit);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100604 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700605 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100606}
607
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700608RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
609 bool is_div) {
610 UNUSED(rl_dest, rl_src1, lit, is_div);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100611 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700612 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100613}
614
615RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
616 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
617
618 // Put the literal in a temp.
619 RegStorage lit_temp = AllocTemp();
620 LoadConstant(lit_temp, lit);
621 // Use the generic case for div/rem with arg2 in a register.
622 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
623 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
624 FreeTemp(lit_temp);
625
626 return rl_result;
627}
628
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100629RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -0700630 RegLocation rl_src2, bool is_div, int flags) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700631 UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100632 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700633 UNREACHABLE();
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100634}
635
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100636RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100637 bool is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100638 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
639
Matteo Franchin43ec8732014-03-31 15:00:14 +0100640 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
641 if (is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100642 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100643 } else {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100644 // temp = r_src1 / r_src2
645 // dest = r_src1 - temp * r_src2
646 RegStorage temp;
Matteo Franchin4163c532014-07-15 15:20:27 +0100647 A64Opcode wide;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100648 if (rl_result.reg.Is64Bit()) {
649 temp = AllocTempWide();
650 wide = WIDE(0);
651 } else {
652 temp = AllocTemp();
653 wide = UNWIDE(0);
654 }
655 OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
656 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
Matteo Franchin65420b22014-10-27 13:29:30 +0000657 r_src2.GetReg(), r_src1.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100658 FreeTemp(temp);
659 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100660 return rl_result;
661}
662
Martyn Capewell9a8a5062014-08-07 11:31:48 +0100663bool Arm64Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
664 RegLocation rl_src = info->args[0];
665 rl_src = LoadValue(rl_src, kCoreReg);
666 RegLocation rl_dest = InlineTarget(info);
667 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
668
669 // Compare the source value with zero. Write the negated value to the result if
670 // negative, otherwise write the original value.
671 OpRegImm(kOpCmp, rl_src.reg, 0);
672 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
673 kArmCondPl);
674 StoreValue(rl_dest, rl_result);
675 return true;
676}
677
Serban Constantinescu169489b2014-06-11 16:43:35 +0100678bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
679 RegLocation rl_src = info->args[0];
680 rl_src = LoadValueWide(rl_src, kCoreReg);
681 RegLocation rl_dest = InlineTargetWide(info);
682 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Martyn Capewell9a8a5062014-08-07 11:31:48 +0100683
684 // Compare the source value with zero. Write the negated value to the result if
685 // negative, otherwise write the original value.
686 OpRegImm(kOpCmp, rl_src.reg, 0);
687 NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_src.reg.GetReg(),
688 rl_src.reg.GetReg(), kArmCondPl);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100689 StoreValueWide(rl_dest, rl_result);
690 return true;
691}
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100692
Serban Constantinescu23abec92014-07-02 16:13:38 +0100693bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100694 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100695 RegLocation rl_src1 = info->args[0];
Serban Constantinescu23abec92014-07-02 16:13:38 +0100696 RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
697 rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
698 rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
699 RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100700 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
701 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Serban Constantinescu23abec92014-07-02 16:13:38 +0100702 NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
703 rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
704 (is_long) ? StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100705 return true;
706}
707
708bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
709 RegLocation rl_src_address = info->args[0]; // long address
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100710 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
711 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100712 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100713
Andreas Gampe3c12c512014-06-24 18:46:29 +0000714 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100715 if (size == k64) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100716 StoreValueWide(rl_dest, rl_result);
717 } else {
718 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100719 StoreValue(rl_dest, rl_result);
720 }
721 return true;
722}
723
724bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
725 RegLocation rl_src_address = info->args[0]; // long address
Matteo Franchin43ec8732014-03-31 15:00:14 +0100726 RegLocation rl_src_value = info->args[2]; // [size] value
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100727 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100728
729 RegLocation rl_value;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100730 if (size == k64) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100731 rl_value = LoadValueWide(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100732 } else {
733 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100734 rl_value = LoadValue(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100735 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000736 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100737 return true;
738}
739
Matteo Franchin43ec8732014-03-31 15:00:14 +0100740bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100741 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100742 // Unused - RegLocation rl_src_unsafe = info->args[0];
743 RegLocation rl_src_obj = info->args[1]; // Object - known non-null
744 RegLocation rl_src_offset = info->args[2]; // long low
Matteo Franchin43ec8732014-03-31 15:00:14 +0100745 RegLocation rl_src_expected = info->args[4]; // int, long or Object
746 // If is_long, high half is in info->args[5]
747 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
748 // If is_long, high half is in info->args[7]
749 RegLocation rl_dest = InlineTarget(info); // boolean place for result
750
Serban Constantinescu169489b2014-06-11 16:43:35 +0100751 // Load Object and offset
buzbeea0cd2d72014-06-01 09:33:49 -0700752 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100753 RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100754
Matteo Franchin43ec8732014-03-31 15:00:14 +0100755 RegLocation rl_new_value;
Serban Constantinescu169489b2014-06-11 16:43:35 +0100756 RegLocation rl_expected;
757 if (is_long) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100758 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100759 rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
760 } else {
761 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
762 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100763 }
764
765 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
766 // Mark card for object assuming new value is stored.
Vladimir Marko743b98c2014-11-24 19:45:41 +0000767 MarkGCCard(0, rl_new_value.reg, rl_object.reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100768 }
769
Serban Constantinescu169489b2014-06-11 16:43:35 +0100770 RegStorage r_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100771 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
772
773 // Free now unneeded rl_object and rl_offset to give more temps.
774 ClobberSReg(rl_object.s_reg_low);
775 FreeTemp(rl_object.reg);
776 ClobberSReg(rl_offset.s_reg_low);
777 FreeTemp(rl_offset.reg);
778
Matteo Franchin43ec8732014-03-31 15:00:14 +0100779 // do {
780 // tmp = [r_ptr] - expected;
781 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
782 // result = tmp != 0;
783
Serban Constantinescu169489b2014-06-11 16:43:35 +0100784 RegStorage r_tmp;
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100785 RegStorage r_tmp_stored;
786 RegStorage rl_new_value_stored = rl_new_value.reg;
Matteo Franchin4163c532014-07-15 15:20:27 +0100787 A64Opcode wide = UNWIDE(0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100788 if (is_long) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100789 r_tmp_stored = r_tmp = AllocTempWide();
790 wide = WIDE(0);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100791 } else if (is_object) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100792 // References use 64-bit registers, but are stored as compressed 32-bit values.
793 // This means r_tmp_stored != r_tmp.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100794 r_tmp = AllocTempRef();
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100795 r_tmp_stored = As32BitReg(r_tmp);
796 rl_new_value_stored = As32BitReg(rl_new_value_stored);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100797 } else {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100798 r_tmp_stored = r_tmp = AllocTemp();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100799 }
800
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100801 RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100802 LIR* loop = NewLIR0(kPseudoTargetLabel);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100803 NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
Serban Constantinescu169489b2014-06-11 16:43:35 +0100804 OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100805 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700806 LIR* early_exit = OpCondBranch(kCondNe, nullptr);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100807 NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
808 NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100809 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
810 OpCondBranch(kCondNe, loop);
811
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100812 LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
813 early_exit->target = exit_loop;
814
Serban Constantinescu169489b2014-06-11 16:43:35 +0100815 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100816 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100817
Matteo Franchin43ec8732014-03-31 15:00:14 +0100818 FreeTemp(r_tmp); // Now unneeded.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100819 FreeTemp(r_ptr); // Now unneeded.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100820
821 StoreValue(rl_dest, rl_result);
822
Matteo Franchin43ec8732014-03-31 15:00:14 +0100823 return true;
824}
825
Zheng Xu947717a2014-08-07 14:05:23 +0800826bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
827 constexpr int kLargeArrayThreshold = 512;
828
829 RegLocation rl_src = info->args[0];
830 RegLocation rl_src_pos = info->args[1];
831 RegLocation rl_dst = info->args[2];
832 RegLocation rl_dst_pos = info->args[3];
833 RegLocation rl_length = info->args[4];
834 // Compile time check, handle exception by non-inline method to reduce related meta-data.
835 if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
836 (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
837 (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
838 return false;
839 }
840
841 ClobberCallerSave();
842 LockCallTemps(); // Prepare for explicit register usage.
843 RegStorage rs_src = rs_x0;
844 RegStorage rs_dst = rs_x1;
845 LoadValueDirectFixed(rl_src, rs_src);
846 LoadValueDirectFixed(rl_dst, rs_dst);
847
848 // Handle null pointer exception in slow-path.
849 LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
850 LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
851 // Handle potential overlapping in slow-path.
852 // TUNING: Support overlapping cases.
853 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
854 // Handle exception or big length in slow-path.
855 RegStorage rs_length = rs_w2;
856 LoadValueDirectFixed(rl_length, rs_length);
857 LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
858 // Src bounds check.
859 RegStorage rs_src_pos = rs_w3;
860 RegStorage rs_arr_length = rs_w4;
861 LoadValueDirectFixed(rl_src_pos, rs_src_pos);
862 LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_src_pos, 0, nullptr);
863 Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
864 OpRegReg(kOpSub, rs_arr_length, rs_src_pos);
865 LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
866 // Dst bounds check.
867 RegStorage rs_dst_pos = rs_w5;
868 LoadValueDirectFixed(rl_dst_pos, rs_dst_pos);
869 LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_dst_pos, 0, nullptr);
870 Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
871 OpRegReg(kOpSub, rs_arr_length, rs_dst_pos);
872 LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
873
874 // Everything is checked now.
875 // Set rs_src to the address of the first element to be copied.
876 rs_src_pos = As64BitReg(rs_src_pos);
877 OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
878 OpRegRegImm(kOpLsl, rs_src_pos, rs_src_pos, 1);
879 OpRegReg(kOpAdd, rs_src, rs_src_pos);
880 // Set rs_src to the address of the first element to be copied.
881 rs_dst_pos = As64BitReg(rs_dst_pos);
882 OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
883 OpRegRegImm(kOpLsl, rs_dst_pos, rs_dst_pos, 1);
884 OpRegReg(kOpAdd, rs_dst, rs_dst_pos);
885
886 // rs_arr_length won't be not used anymore.
887 RegStorage rs_tmp = rs_arr_length;
888 // Use 64-bit view since rs_length will be used as index.
889 rs_length = As64BitReg(rs_length);
890 OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
891
892 // Copy one element.
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800893 LIR* jmp_to_copy_two = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 1, 0);
Zheng Xu947717a2014-08-07 14:05:23 +0800894 OpRegImm(kOpSub, rs_length, 2);
895 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
896 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
897
898 // Copy two elements.
899 LIR *copy_two = NewLIR0(kPseudoTargetLabel);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800900 LIR* jmp_to_copy_four = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 2, 0);
Zheng Xu947717a2014-08-07 14:05:23 +0800901 OpRegImm(kOpSub, rs_length, 4);
902 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
903 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
904
905 // Copy four elements.
906 LIR *copy_four = NewLIR0(kPseudoTargetLabel);
907 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
908 LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
909 OpRegImm(kOpSub, rs_length, 8);
910 rs_tmp = As64BitReg(rs_tmp);
911 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k64);
912 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k64);
913 LIR* jmp_to_loop = OpCmpImmBranch(kCondNe, rs_length, 0, nullptr);
914 LIR* loop_finished = OpUnconditionalBranch(nullptr);
915
916 LIR *check_failed = NewLIR0(kPseudoTargetLabel);
917 LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
918 LIR* return_point = NewLIR0(kPseudoTargetLabel);
919
920 src_check_branch->target = check_failed;
921 dst_check_branch->target = check_failed;
922 src_dst_same->target = check_failed;
923 len_neg_or_too_big->target = check_failed;
924 src_pos_negative->target = check_failed;
925 src_bad_len->target = check_failed;
926 dst_pos_negative->target = check_failed;
927 dst_bad_len->target = check_failed;
928 jmp_to_copy_two->target = copy_two;
929 jmp_to_copy_four->target = copy_four;
930 jmp_to_ret->target = return_point;
931 jmp_to_loop->target = begin_loop;
932 loop_finished->target = return_point;
933
934 AddIntrinsicSlowPath(info, launchpad_branch, return_point);
Serguei Katkov9863daf2014-09-04 15:21:32 +0700935 ClobberCallerSave(); // We must clobber everything because slow path will return here
Zheng Xu947717a2014-08-07 14:05:23 +0800936
937 return true;
938}
939
Vladimir Markof6737f72015-03-23 17:05:14 +0000940void Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
Serban Constantinescu63999682014-07-15 17:44:21 +0100941 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Vladimir Markof6737f72015-03-23 17:05:14 +0000942 LIR* lir = NewLIR2(kA64Ldr2rp, As32BitReg(reg).GetReg(), 0);
943 lir->target = target;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100944}
945
Vladimir Marko20f85592015-03-19 10:07:02 +0000946bool Arm64Mir2Lir::CanUseOpPcRelDexCacheArrayLoad() const {
Vladimir Marko20f85592015-03-19 10:07:02 +0000947 return dex_cache_arrays_layout_.Valid();
948}
949
Mathieu Chartier3d21bdf2015-04-22 13:56:20 -0700950void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest,
951 bool wide) {
Vladimir Marko20f85592015-03-19 10:07:02 +0000952 LIR* adrp = NewLIR2(kA64Adrp2xd, r_dest.GetReg(), 0);
953 adrp->operands[2] = WrapPointer(dex_file);
954 adrp->operands[3] = offset;
955 adrp->operands[4] = WrapPointer(adrp);
956 dex_cache_access_insns_.push_back(adrp);
Mathieu Chartier3d21bdf2015-04-22 13:56:20 -0700957 if (wide) {
958 DCHECK(r_dest.Is64Bit());
959 }
960 LIR* ldr = LoadBaseDisp(r_dest, 0, r_dest, wide ? k64 : kReference, kNotVolatile);
Vladimir Marko20f85592015-03-19 10:07:02 +0000961 ldr->operands[4] = adrp->operands[4];
962 ldr->flags.fixup = kFixupLabel;
963 dex_cache_access_insns_.push_back(ldr);
964}
965
Matteo Franchin43ec8732014-03-31 15:00:14 +0100966LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700967 UNUSED(r_base, count);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100968 LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700969 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100970}
971
972LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700973 UNUSED(r_base, count);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100974 LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700975 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100976}
977
Ningsheng Jiana262f772014-11-25 16:48:07 +0800978void Arm64Mir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
979 RegLocation rl_src3, bool is_sub) {
980 rl_src1 = LoadValue(rl_src1, kCoreReg);
981 rl_src2 = LoadValue(rl_src2, kCoreReg);
982 rl_src3 = LoadValue(rl_src3, kCoreReg);
983 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
984 NewLIR4(is_sub ? kA64Msub4rrrr : kA64Madd4rrrr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
985 rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
986 StoreValue(rl_dest, rl_result);
987}
988
989void Arm64Mir2Lir::GenMaddMsubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
990 RegLocation rl_src3, bool is_sub) {
991 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
992 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
993 rl_src3 = LoadValueWide(rl_src3, kCoreReg);
994 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
995 NewLIR4(is_sub ? WIDE(kA64Msub4rrrr) : WIDE(kA64Madd4rrrr), rl_result.reg.GetReg(),
996 rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
997 StoreValueWide(rl_dest, rl_result);
998}
999
Matteo Franchin43ec8732014-03-31 15:00:14 +01001000void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001001 RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
1002 int first_bit, int second_bit) {
Ningsheng Jiana262f772014-11-25 16:48:07 +08001003 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
1004 EncodeShift(kA64Lsl, second_bit - first_bit));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001005 if (first_bit != 0) {
1006 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
1007 }
1008}
1009
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001010void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001011 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001012}
1013
1014// Test suspend flag, return target of taken suspend branch
1015LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
Zheng Xu69a50302015-04-14 20:04:41 +08001016 RegStorage r_tmp = AllocTemp();
1017 LoadBaseDisp(rs_xSELF, Thread::ThreadFlagsOffset<kArm64PointerSize>().Int32Value(), r_tmp,
1018 kUnsignedHalf, kNotVolatile);
1019 LIR* cmp_branch = OpCmpImmBranch(target == nullptr ? kCondNe: kCondEq, r_tmp, 0, target);
1020 FreeTemp(r_tmp);
1021 return cmp_branch;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001022}
1023
1024// Decrement register and branch on condition
1025LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
buzbee33ae5582014-06-12 14:56:32 -07001026 // Combine sub & test using sub setflags encoding here. We need to make sure a
1027 // subtract form that sets carry is used, so generate explicitly.
1028 // TODO: might be best to add a new op, kOpSubs, and handle it generically.
Matteo Franchin4163c532014-07-15 15:20:27 +01001029 A64Opcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
buzbee33ae5582014-06-12 14:56:32 -07001030 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001031 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001032 return OpCondBranch(c_code, target);
1033}
1034
Andreas Gampeb14329f2014-05-15 11:16:06 -07001035bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Andreas Gampe0b9203e2015-01-22 20:39:27 -08001036 if (!cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
Elliott Hughes8366ca02014-11-17 12:02:05 -08001037 return false;
1038 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001039 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
1040 LIR* barrier = last_lir_insn_;
1041
1042 int dmb_flavor;
1043 // TODO: revisit Arm barrier kinds
1044 switch (barrier_kind) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001045 case kAnyStore: dmb_flavor = kISH; break;
1046 case kLoadAny: dmb_flavor = kISH; break;
1047 // We conjecture that kISHLD is insufficient. It is documented
1048 // to provide LoadLoad | StoreStore ordering. But if this were used
1049 // to implement volatile loads, we suspect that the lack of store
1050 // atomicity on ARM would cause us to allow incorrect results for
1051 // the canonical IRIW example. But we're not sure.
1052 // We should be using acquire loads instead.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001053 case kStoreStore: dmb_flavor = kISHST; break;
Hans Boehm48f5c472014-06-27 14:50:10 -07001054 case kAnyAny: dmb_flavor = kISH; break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001055 default:
1056 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
1057 dmb_flavor = kSY; // quiet gcc.
1058 break;
1059 }
1060
Andreas Gampeb14329f2014-05-15 11:16:06 -07001061 bool ret = false;
1062
Matteo Franchin43ec8732014-03-31 15:00:14 +01001063 // If the same barrier already exists, don't generate another.
1064 if (barrier == nullptr
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001065 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
1066 barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
Andreas Gampeb14329f2014-05-15 11:16:06 -07001067 ret = true;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001068 }
1069
1070 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
1071 DCHECK(!barrier->flags.use_def_invalid);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001072 barrier->u.m.def_mask = &kEncodeAll;
Andreas Gampeb14329f2014-05-15 11:16:06 -07001073 return ret;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001074}
1075
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001076void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
1077 RegLocation rl_result;
1078
1079 rl_src = LoadValue(rl_src, kCoreReg);
1080 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Andreas Gampe4b537a82014-06-30 22:24:53 -07001081 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001082 StoreValueWide(rl_dest, rl_result);
1083}
1084
1085void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001086 RegLocation rl_src1, RegLocation rl_src2, bool is_div, int flags) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +01001087 if (rl_src2.is_const) {
1088 DCHECK(rl_src2.wide);
1089 int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
1090 if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) {
1091 return;
1092 }
1093 }
1094
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001095 RegLocation rl_result;
1096 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1097 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001098 if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
1099 GenDivZeroCheck(rl_src2.reg);
1100 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001101 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001102 StoreValueWide(rl_dest, rl_result);
1103}
1104
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001105void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
1106 RegLocation rl_src2) {
1107 RegLocation rl_result;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001108
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001109 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1110 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1111 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001112 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
1113 StoreValueWide(rl_dest, rl_result);
1114}
1115
1116void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
1117 RegLocation rl_result;
1118
1119 rl_src = LoadValueWide(rl_src, kCoreReg);
1120 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1121 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
1122 StoreValueWide(rl_dest, rl_result);
1123}
1124
1125void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
1126 RegLocation rl_result;
1127
1128 rl_src = LoadValueWide(rl_src, kCoreReg);
1129 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1130 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001131 StoreValueWide(rl_dest, rl_result);
1132}
1133
Andreas Gampec76c6142014-08-04 16:30:03 -07001134void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001135 RegLocation rl_src1, RegLocation rl_src2, int flags) {
Andreas Gampec76c6142014-08-04 16:30:03 -07001136 switch (opcode) {
1137 case Instruction::NOT_LONG:
1138 GenNotLong(rl_dest, rl_src2);
1139 return;
1140 case Instruction::ADD_LONG:
1141 case Instruction::ADD_LONG_2ADDR:
1142 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
1143 return;
1144 case Instruction::SUB_LONG:
1145 case Instruction::SUB_LONG_2ADDR:
1146 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
1147 return;
1148 case Instruction::MUL_LONG:
1149 case Instruction::MUL_LONG_2ADDR:
1150 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
1151 return;
1152 case Instruction::DIV_LONG:
1153 case Instruction::DIV_LONG_2ADDR:
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001154 GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
Andreas Gampec76c6142014-08-04 16:30:03 -07001155 return;
1156 case Instruction::REM_LONG:
1157 case Instruction::REM_LONG_2ADDR:
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001158 GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
Andreas Gampec76c6142014-08-04 16:30:03 -07001159 return;
1160 case Instruction::AND_LONG_2ADDR:
1161 case Instruction::AND_LONG:
1162 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
1163 return;
1164 case Instruction::OR_LONG:
1165 case Instruction::OR_LONG_2ADDR:
1166 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
1167 return;
1168 case Instruction::XOR_LONG:
1169 case Instruction::XOR_LONG_2ADDR:
1170 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
1171 return;
1172 case Instruction::NEG_LONG: {
1173 GenNegLong(rl_dest, rl_src2);
1174 return;
1175 }
1176 default:
1177 LOG(FATAL) << "Invalid long arith op";
1178 return;
1179 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001180}
1181
1182/*
1183 * Generate array load
1184 */
1185void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
1186 RegLocation rl_index, RegLocation rl_dest, int scale) {
1187 RegisterClass reg_class = RegClassBySize(size);
1188 int len_offset = mirror::Array::LengthOffset().Int32Value();
1189 int data_offset;
1190 RegLocation rl_result;
1191 bool constant_index = rl_index.is_const;
buzbeea0cd2d72014-06-01 09:33:49 -07001192 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001193 if (!constant_index) {
1194 rl_index = LoadValue(rl_index, kCoreReg);
1195 }
1196
1197 if (rl_dest.wide) {
1198 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1199 } else {
1200 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1201 }
1202
Matteo Franchin43ec8732014-03-31 15:00:14 +01001203 /* null object? */
1204 GenNullCheck(rl_array.reg, opt_flags);
1205
1206 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1207 RegStorage reg_len;
1208 if (needs_range_check) {
1209 reg_len = AllocTemp();
1210 /* Get len */
1211 Load32Disp(rl_array.reg, len_offset, reg_len);
1212 MarkPossibleNullPointerException(opt_flags);
1213 } else {
1214 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1215 }
Vladimir Markoe08785b2014-11-07 16:11:00 +00001216 if (constant_index) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001217 rl_result = EvalLoc(rl_dest, reg_class, true);
1218
1219 if (needs_range_check) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001220 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001221 FreeTemp(reg_len);
1222 }
Vladimir Markoe08785b2014-11-07 16:11:00 +00001223 // Fold the constant index into the data offset.
1224 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
Andreas Gampe3c12c512014-06-24 18:46:29 +00001225 if (rl_result.ref) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001226 LoadRefDisp(rl_array.reg, data_offset, rl_result.reg, kNotVolatile);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001227 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001228 LoadBaseDisp(rl_array.reg, data_offset, rl_result.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001229 }
1230 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001231 // Offset base, then use indexed load.
buzbeea0cd2d72014-06-01 09:33:49 -07001232 RegStorage reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001233 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1234 FreeTemp(rl_array.reg);
1235 rl_result = EvalLoc(rl_dest, reg_class, true);
1236
1237 if (needs_range_check) {
1238 GenArrayBoundsCheck(rl_index.reg, reg_len);
1239 FreeTemp(reg_len);
1240 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001241 if (rl_result.ref) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001242 LoadRefIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001243 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001244 LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001245 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001246 FreeTemp(reg_ptr);
Vladimir Markoe08785b2014-11-07 16:11:00 +00001247 }
1248 if (rl_dest.wide) {
1249 StoreValueWide(rl_dest, rl_result);
1250 } else {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001251 StoreValue(rl_dest, rl_result);
1252 }
1253}
1254
1255/*
1256 * Generate array store
1257 *
1258 */
1259void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1260 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1261 RegisterClass reg_class = RegClassBySize(size);
1262 int len_offset = mirror::Array::LengthOffset().Int32Value();
1263 bool constant_index = rl_index.is_const;
1264
1265 int data_offset;
1266 if (size == k64 || size == kDouble) {
1267 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1268 } else {
1269 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1270 }
1271
buzbeea0cd2d72014-06-01 09:33:49 -07001272 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001273 if (!constant_index) {
1274 rl_index = LoadValue(rl_index, kCoreReg);
1275 }
1276
1277 RegStorage reg_ptr;
1278 bool allocated_reg_ptr_temp = false;
1279 if (constant_index) {
1280 reg_ptr = rl_array.reg;
1281 } else if (IsTemp(rl_array.reg) && !card_mark) {
1282 Clobber(rl_array.reg);
1283 reg_ptr = rl_array.reg;
1284 } else {
1285 allocated_reg_ptr_temp = true;
buzbeea0cd2d72014-06-01 09:33:49 -07001286 reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001287 }
1288
1289 /* null object? */
1290 GenNullCheck(rl_array.reg, opt_flags);
1291
1292 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1293 RegStorage reg_len;
1294 if (needs_range_check) {
1295 reg_len = AllocTemp();
1296 // NOTE: max live temps(4) here.
1297 /* Get len */
1298 Load32Disp(rl_array.reg, len_offset, reg_len);
1299 MarkPossibleNullPointerException(opt_flags);
1300 } else {
1301 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1302 }
1303 /* at this point, reg_ptr points to array, 2 live temps */
Vladimir Markoe08785b2014-11-07 16:11:00 +00001304 if (rl_src.wide) {
1305 rl_src = LoadValueWide(rl_src, reg_class);
1306 } else {
1307 rl_src = LoadValue(rl_src, reg_class);
1308 }
1309 if (constant_index) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001310 if (needs_range_check) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001311 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001312 FreeTemp(reg_len);
1313 }
Vladimir Markoe08785b2014-11-07 16:11:00 +00001314 // Fold the constant index into the data offset.
1315 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
Andreas Gampe3c12c512014-06-24 18:46:29 +00001316 if (rl_src.ref) {
1317 StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
1318 } else {
1319 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
1320 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001321 } else {
1322 /* reg_ptr -> array data */
1323 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001324 if (needs_range_check) {
1325 GenArrayBoundsCheck(rl_index.reg, reg_len);
1326 FreeTemp(reg_len);
1327 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001328 if (rl_src.ref) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001329 StoreRefIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001330 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001331 StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001332 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001333 }
1334 if (allocated_reg_ptr_temp) {
1335 FreeTemp(reg_ptr);
1336 }
1337 if (card_mark) {
Vladimir Marko743b98c2014-11-24 19:45:41 +00001338 MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001339 }
1340}
1341
Matteo Franchin43ec8732014-03-31 15:00:14 +01001342void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001343 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001344 int flags ATTRIBUTE_UNUSED) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001345 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001346 // Per spec, we only care about low 6 bits of shift amount.
1347 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001348 rl_src = LoadValueWide(rl_src, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001349 if (shift_amount == 0) {
1350 StoreValueWide(rl_dest, rl_src);
1351 return;
1352 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001353
1354 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001355 switch (opcode) {
1356 case Instruction::SHL_LONG:
1357 case Instruction::SHL_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001358 op = kOpLsl;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001359 break;
1360 case Instruction::SHR_LONG:
1361 case Instruction::SHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001362 op = kOpAsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001363 break;
1364 case Instruction::USHR_LONG:
1365 case Instruction::USHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001366 op = kOpLsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001367 break;
1368 default:
1369 LOG(FATAL) << "Unexpected case";
1370 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001371 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001372 StoreValueWide(rl_dest, rl_result);
1373}
1374
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001375void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001376 RegLocation rl_src1, RegLocation rl_src2, int flags) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001377 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001378 switch (opcode) {
1379 case Instruction::ADD_LONG:
1380 case Instruction::ADD_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001381 op = kOpAdd;
1382 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001383 case Instruction::SUB_LONG:
1384 case Instruction::SUB_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001385 op = kOpSub;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001386 break;
1387 case Instruction::AND_LONG:
1388 case Instruction::AND_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001389 op = kOpAnd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001390 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001391 case Instruction::OR_LONG:
1392 case Instruction::OR_LONG_2ADDR:
1393 op = kOpOr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001394 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001395 case Instruction::XOR_LONG:
1396 case Instruction::XOR_LONG_2ADDR:
1397 op = kOpXor;
1398 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001399 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001400 LOG(FATAL) << "Unexpected opcode";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001401 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001402
Matteo Franchinc763e352014-07-04 12:53:27 +01001403 if (op == kOpSub) {
1404 if (!rl_src2.is_const) {
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001405 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
Matteo Franchinc763e352014-07-04 12:53:27 +01001406 }
1407 } else {
1408 // Associativity.
1409 if (!rl_src2.is_const) {
1410 DCHECK(rl_src1.is_const);
1411 std::swap(rl_src1, rl_src2);
1412 }
1413 }
1414 DCHECK(rl_src2.is_const);
1415 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1416
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001417 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1418 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +08001419 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001420 StoreValueWide(rl_dest, rl_result);
1421}
1422
Andreas Gampef29ecd62014-07-29 00:35:00 -07001423static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
1424 // Find first register.
1425 int first_bit_set = CTZ(reg_mask) + 1;
1426 *reg = *reg + first_bit_set;
1427 reg_mask >>= first_bit_set;
1428 return reg_mask;
1429}
1430
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001431/**
1432 * @brief Split a register list in pairs or registers.
1433 *
1434 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1435 * @code
1436 * int reg1 = -1, reg2 = -1;
1437 * while (reg_mask) {
1438 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1439 * if (UNLIKELY(reg2 < 0)) {
1440 * // Single register in reg1.
1441 * } else {
1442 * // Pair in reg1, reg2.
1443 * }
1444 * }
1445 * @endcode
1446 */
Andreas Gampef29ecd62014-07-29 00:35:00 -07001447static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001448 // Find first register.
Andreas Gampef29ecd62014-07-29 00:35:00 -07001449 int first_bit_set = CTZ(reg_mask) + 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001450 int reg = *reg1 + first_bit_set;
1451 reg_mask >>= first_bit_set;
1452
1453 if (LIKELY(reg_mask)) {
1454 // Save the first register, find the second and use the pair opcode.
Andreas Gampef29ecd62014-07-29 00:35:00 -07001455 int second_bit_set = CTZ(reg_mask) + 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001456 *reg2 = reg;
1457 reg_mask >>= second_bit_set;
1458 *reg1 = reg + second_bit_set;
1459 return reg_mask;
1460 }
1461
1462 // Use the single opcode, as we just have one register.
1463 *reg1 = reg;
1464 *reg2 = -1;
1465 return reg_mask;
1466}
1467
David Srbecky1109fb32015-04-07 20:21:06 +01001468static dwarf::Reg DwarfCoreReg(int num) {
1469 return dwarf::Reg::Arm64Core(num);
1470}
1471
1472static dwarf::Reg DwarfFpReg(int num) {
1473 return dwarf::Reg::Arm64Fp(num);
1474}
1475
Andreas Gampef29ecd62014-07-29 00:35:00 -07001476static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001477 int reg1 = -1, reg2 = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001478 const int reg_log2_size = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001479
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001480 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001481 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1482 if (UNLIKELY(reg2 < 0)) {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001483 m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001484 m2l->cfi().RelOffset(DwarfCoreReg(reg1), offset << reg_log2_size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001485 } else {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001486 m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1487 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001488 m2l->cfi().RelOffset(DwarfCoreReg(reg2), offset << reg_log2_size);
1489 m2l->cfi().RelOffset(DwarfCoreReg(reg1), (offset + 1) << reg_log2_size);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001490 }
1491 }
1492}
1493
1494// TODO(Arm64): consider using ld1 and st1?
Andreas Gampef29ecd62014-07-29 00:35:00 -07001495static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001496 int reg1 = -1, reg2 = -1;
1497 const int reg_log2_size = 3;
1498
1499 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1500 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1501 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001502 m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001503 offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001504 m2l->cfi().RelOffset(DwarfFpReg(reg1), offset << reg_log2_size);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001505 } else {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001506 m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1507 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001508 m2l->cfi().RelOffset(DwarfFpReg(reg2), offset << reg_log2_size);
1509 m2l->cfi().RelOffset(DwarfFpReg(reg1), (offset + 1) << reg_log2_size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001510 }
1511 }
1512}
1513
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001514static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1515 int frame_size) {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001516 m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
David Srbecky1109fb32015-04-07 20:21:06 +01001517 m2l->cfi().AdjustCFAOffset(frame_size);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001518
1519 int core_count = POPCOUNT(core_reg_mask);
1520
1521 if (fp_reg_mask != 0) {
1522 // Spill FP regs.
1523 int fp_count = POPCOUNT(fp_reg_mask);
1524 int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
1525 SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
1526 }
1527
1528 if (core_reg_mask != 0) {
1529 // Spill core regs.
1530 int spill_offset = frame_size - (core_count * kArm64PointerSize);
1531 SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
1532 }
1533
1534 return frame_size;
1535}
1536
1537static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001538 uint32_t fp_reg_mask) {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001539 // Otherwise, spill both core and fp regs at the same time.
1540 // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
1541 // down. From then on, we fill upwards. This will generate overall the same number of instructions
1542 // as the specialized code above in most cases (exception being odd number of core and even
1543 // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
1544 //
1545 // Some demonstrative fill cases : (c) = core, (f) = fp
1546 // cc 44 cc 44 cc 22 cc 33 fc => 1[1/2]
1547 // fc => 23 fc => 23 ff => 11 ff => 22
1548 // ff 11 f 11 f 11
1549 //
1550 int reg1 = -1, reg2 = -1;
1551 int core_count = POPCOUNT(core_reg_mask);
1552 int fp_count = POPCOUNT(fp_reg_mask);
1553
1554 int combined = fp_count + core_count;
1555 int all_offset = RoundUp(combined, 2); // Needs to be 16B = 2-reg aligned.
1556
1557 int cur_offset = 2; // What's the starting offset after the first stp? We expect the base slot
1558 // to be filled.
1559
1560 // First figure out whether the bottom is FP or core.
1561 if (fp_count > 0) {
1562 // Some FP spills.
1563 //
1564 // Four cases: (d0 is dummy to fill up stp)
1565 // 1) Single FP, even number of core -> stp d0, fp_reg
1566 // 2) Single FP, odd number of core -> stp fp_reg, d0
1567 // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
1568 // 4) More FP, odd number combined -> stp d0, fp_reg
1569 if (fp_count == 1) {
1570 fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
1571 DCHECK_EQ(fp_reg_mask, 0U);
1572 if (core_count % 2 == 0) {
1573 m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
1574 RegStorage::FloatSolo64(reg1).GetReg(),
1575 RegStorage::FloatSolo64(reg1).GetReg(),
1576 base.GetReg(), -all_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001577 m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
1578 m2l->cfi().RelOffset(DwarfFpReg(reg1), kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001579 } else {
1580 m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
1581 RegStorage::FloatSolo64(reg1).GetReg(),
1582 RegStorage::FloatSolo64(reg1).GetReg(),
1583 base.GetReg(), -all_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001584 m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
1585 m2l->cfi().RelOffset(DwarfFpReg(reg1), 0);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001586 cur_offset = 0; // That core reg needs to go into the upper half.
1587 }
1588 } else {
1589 if (combined % 2 == 0) {
1590 fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
1591 m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1592 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001593 m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
1594 m2l->cfi().RelOffset(DwarfFpReg(reg2), 0);
1595 m2l->cfi().RelOffset(DwarfFpReg(reg1), kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001596 } else {
1597 fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
1598 m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
1599 base.GetReg(), -all_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001600 m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
1601 m2l->cfi().RelOffset(DwarfFpReg(reg1), kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001602 }
1603 }
1604 } else {
1605 // No FP spills.
1606 //
1607 // Two cases:
1608 // 1) Even number of core -> stp core1, core2
1609 // 2) Odd number of core -> stp xzr, core1
1610 if (core_count % 2 == 1) {
1611 core_reg_mask = ExtractReg(core_reg_mask, &reg1);
1612 m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
1613 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001614 m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
1615 m2l->cfi().RelOffset(DwarfCoreReg(reg1), kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001616 } else {
1617 core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
1618 m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
1619 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001620 m2l->cfi().AdjustCFAOffset(all_offset * kArm64PointerSize);
1621 m2l->cfi().RelOffset(DwarfCoreReg(reg2), 0);
1622 m2l->cfi().RelOffset(DwarfCoreReg(reg1), kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001623 }
1624 }
David Srbecky1109fb32015-04-07 20:21:06 +01001625 DCHECK_EQ(m2l->cfi().GetCurrentCFAOffset(),
1626 static_cast<int>(all_offset * kArm64PointerSize));
Andreas Gampef29ecd62014-07-29 00:35:00 -07001627
1628 if (fp_count != 0) {
1629 for (; fp_reg_mask != 0;) {
1630 // Have some FP regs to do.
1631 fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
1632 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001633 m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001634 cur_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001635 m2l->cfi().RelOffset(DwarfFpReg(reg1), cur_offset * kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001636 // Do not increment offset here, as the second half will be filled by a core reg.
1637 } else {
1638 m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1639 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001640 m2l->cfi().RelOffset(DwarfFpReg(reg2), cur_offset * kArm64PointerSize);
1641 m2l->cfi().RelOffset(DwarfFpReg(reg1), (cur_offset + 1) * kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001642 cur_offset += 2;
1643 }
1644 }
1645
1646 // Reset counting.
1647 reg1 = -1;
1648
1649 // If there is an odd number of core registers, we need to store the bottom now.
1650 if (core_count % 2 == 1) {
1651 core_reg_mask = ExtractReg(core_reg_mask, &reg1);
1652 m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
1653 cur_offset + 1);
David Srbecky1109fb32015-04-07 20:21:06 +01001654 m2l->cfi().RelOffset(DwarfCoreReg(reg1), (cur_offset + 1) * kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001655 cur_offset += 2; // Half-slot filled now.
1656 }
1657 }
1658
1659 // Spill the rest of the core regs. They are guaranteed to be even.
1660 DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
1661 for (; core_reg_mask != 0; cur_offset += 2) {
1662 core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
1663 m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1664 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001665 m2l->cfi().RelOffset(DwarfCoreReg(reg2), cur_offset * kArm64PointerSize);
1666 m2l->cfi().RelOffset(DwarfCoreReg(reg1), (cur_offset + 1) * kArm64PointerSize);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001667 }
1668
1669 DCHECK_EQ(cur_offset, all_offset);
1670
1671 return all_offset * 8;
1672}
1673
1674int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1675 int frame_size) {
1676 // If the frame size is small enough that all offsets would fit into the immediates, use that
1677 // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
1678 // instruction-count wise than the complicated code below.
1679 //
1680 // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
1681 // number of fp spills.
1682 if ((RoundUp(frame_size, 8) / 8 <= 63)) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001683 return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001684 } else {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001685 return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001686 }
1687}
1688
1689static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
1690 int reg1 = -1, reg2 = -1;
1691 const int reg_log2_size = 3;
1692
1693 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1694 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1695 if (UNLIKELY(reg2 < 0)) {
1696 m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001697 m2l->cfi().Restore(DwarfCoreReg(reg1));
Andreas Gampef29ecd62014-07-29 00:35:00 -07001698 } else {
1699 DCHECK_LE(offset, 63);
1700 m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1701 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001702 m2l->cfi().Restore(DwarfCoreReg(reg2));
1703 m2l->cfi().Restore(DwarfCoreReg(reg1));
Andreas Gampef29ecd62014-07-29 00:35:00 -07001704 }
1705 }
1706}
1707
1708static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
1709 int reg1 = -1, reg2 = -1;
1710 const int reg_log2_size = 3;
1711
1712 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1713 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1714 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001715 m2l->NewLIR3(WIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001716 offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001717 m2l->cfi().Restore(DwarfFpReg(reg1));
Andreas Gampef29ecd62014-07-29 00:35:00 -07001718 } else {
1719 m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1720 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
David Srbecky1109fb32015-04-07 20:21:06 +01001721 m2l->cfi().Restore(DwarfFpReg(reg2));
1722 m2l->cfi().Restore(DwarfFpReg(reg1));
Andreas Gampef29ecd62014-07-29 00:35:00 -07001723 }
1724 }
1725}
1726
1727void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1728 int frame_size) {
Ian Rogersb28c1c02014-11-08 11:21:21 -08001729 DCHECK_EQ(base, rs_sp);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001730 // Restore saves and drop stack frame.
1731 // 2 versions:
1732 //
1733 // 1. (Original): Try to address directly, then drop the whole frame.
1734 // Limitation: ldp is a 7b signed immediate.
1735 //
1736 // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
1737 // in range. Then drop the rest.
1738 //
1739 // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
1740 // in variant 1.
1741
1742 // "Magic" constant, 63 (max signed 7b) * 8.
1743 static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
1744
1745 const int num_core_spills = POPCOUNT(core_reg_mask);
1746 const int num_fp_spills = POPCOUNT(fp_reg_mask);
1747
1748 int early_drop = 0;
1749
1750 if (frame_size > kMaxFramesizeForOffset) {
1751 // Second variant. Drop the frame part.
1752
1753 // TODO: Always use the first formula, as num_fp_spills would be zero?
1754 if (fp_reg_mask != 0) {
1755 early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
1756 } else {
1757 early_drop = frame_size - kArm64PointerSize * num_core_spills;
1758 }
1759
1760 // Drop needs to be 16B aligned, so that SP keeps aligned.
1761 early_drop = RoundDown(early_drop, 16);
1762
1763 OpRegImm64(kOpAdd, rs_sp, early_drop);
David Srbecky1109fb32015-04-07 20:21:06 +01001764 cfi_.AdjustCFAOffset(-early_drop);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001765 }
1766
1767 // Unspill.
1768 if (fp_reg_mask != 0) {
1769 int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
1770 UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
1771 }
1772 if (core_reg_mask != 0) {
1773 int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
1774 UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
1775 }
1776
1777 // Drop the (rest of) the frame.
David Srbecky1109fb32015-04-07 20:21:06 +01001778 int adjust = frame_size - early_drop;
1779 OpRegImm64(kOpAdd, rs_sp, adjust);
1780 cfi_.AdjustCFAOffset(-adjust);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001781}
1782
Serban Constantinescu23abec92014-07-02 16:13:38 +01001783bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001784 A64Opcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001785 RegLocation rl_src_i = info->args[0];
Fred Shih37f05ef2014-07-16 18:38:08 -07001786 RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg
Serban Constantinescu23abec92014-07-02 16:13:38 +01001787 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Ningsheng Jiana262f772014-11-25 16:48:07 +08001788 RegLocation rl_i = IsWide(size) ?
1789 LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001790 NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
Fred Shih37f05ef2014-07-16 18:38:08 -07001791 IsWide(size) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001792 return true;
1793}
1794
Matteo Franchin43ec8732014-03-31 15:00:14 +01001795} // namespace art