blob: 61a1becac1d638db8d460b003764f82493aa68cc [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080018
19#include "base/logging.h"
Vladimir Marko1961b602015-04-08 20:51:48 +010020#include "dex/mir_graph.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070021#include "dex/quick/mir_to_lir-inl.h"
Mark Mendell67c39c42014-01-31 17:28:00 -080022#include "dex/dataflow_iterator-inl.h"
Yixin Shou7071c8d2014-03-05 06:07:48 -050023#include "dex/quick/dex_file_method_inliner.h"
24#include "dex/quick/dex_file_to_method_inliner_map.h"
buzbeeb5860fb2014-06-21 15:31:01 -070025#include "dex/reg_storage_eq.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080026#include "driver/compiler_driver.h"
27#include "x86_lir.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070028
29namespace art {
30
31/* This file contains codegen for the X86 ISA */
32
buzbee2700f7e2014-03-07 09:46:20 -080033LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070034 int opcode;
35 /* must be both DOUBLE or both not DOUBLE */
buzbee091cc402014-03-31 10:14:40 -070036 DCHECK(r_dest.IsFloat() || r_src.IsFloat());
37 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
38 if (r_dest.IsDouble()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070039 opcode = kX86MovsdRR;
40 } else {
buzbee091cc402014-03-31 10:14:40 -070041 if (r_dest.IsSingle()) {
42 if (r_src.IsSingle()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070043 opcode = kX86MovssRR;
44 } else { // Fpr <- Gpr
45 opcode = kX86MovdxrRR;
46 }
47 } else { // Gpr <- Fpr
buzbee091cc402014-03-31 10:14:40 -070048 DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits();
Brian Carlstrom7940e442013-07-12 13:46:57 -070049 opcode = kX86MovdrxRR;
50 }
51 }
52 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
buzbee2700f7e2014-03-07 09:46:20 -080053 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070054 if (r_dest == r_src) {
55 res->flags.is_nop = true;
56 }
57 return res;
58}
59
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070060bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070061 UNUSED(value);
Brian Carlstrom7940e442013-07-12 13:46:57 -070062 return true;
63}
64
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070065bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070066 return value == 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -070067}
68
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070069bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070070 UNUSED(value);
Brian Carlstrom7940e442013-07-12 13:46:57 -070071 return true;
72}
73
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070074bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
Mark Mendell67c39c42014-01-31 17:28:00 -080075 return value == 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -070076}
77
78/*
79 * Load a immediate using a shortcut if possible; otherwise
80 * grab from the per-translation literal pool. If target is
81 * a high register, build constant into a low register and copy.
82 *
83 * No additional register clobbering operation performed. Use this version when
84 * 1) r_dest is freshly returned from AllocTemp or
85 * 2) The codegen is under fixed register usage
86 */
buzbee2700f7e2014-03-07 09:46:20 -080087LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
88 RegStorage r_dest_save = r_dest;
buzbee091cc402014-03-31 10:14:40 -070089 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070090 if (value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080091 return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070092 }
Brian Carlstrom7940e442013-07-12 13:46:57 -070093 r_dest = AllocTemp();
94 }
95
96 LIR *res;
97 if (value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080098 res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070099 } else {
100 // Note, there is no byte immediate form of a 32 bit immediate move.
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700101 // 64-bit immediate is not supported by LIR structure
102 res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700103 }
104
buzbee091cc402014-03-31 10:14:40 -0700105 if (r_dest_save.IsFloat()) {
buzbee2700f7e2014-03-07 09:46:20 -0800106 NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700107 FreeTemp(r_dest);
108 }
109
110 return res;
111}
112
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700113LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
Brian Carlstromdf629502013-07-17 22:39:56 -0700114 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700115 res->target = target;
116 return res;
117}
118
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700119LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
121 X86ConditionEncoding(cc));
122 branch->target = target;
123 return branch;
124}
125
buzbee2700f7e2014-03-07 09:46:20 -0800126LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700127 X86OpCode opcode = kX86Bkpt;
128 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700129 case kOpNeg: opcode = r_dest_src.Is64Bit() ? kX86Neg64R : kX86Neg32R; break;
130 case kOpNot: opcode = r_dest_src.Is64Bit() ? kX86Not64R : kX86Not32R; break;
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700131 case kOpRev: opcode = r_dest_src.Is64Bit() ? kX86Bswap64R : kX86Bswap32R; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700132 case kOpBlx: opcode = kX86CallR; break;
133 default:
134 LOG(FATAL) << "Bad case in OpReg " << op;
135 }
buzbee2700f7e2014-03-07 09:46:20 -0800136 return NewLIR1(opcode, r_dest_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700137}
138
buzbee2700f7e2014-03-07 09:46:20 -0800139LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700140 X86OpCode opcode = kX86Bkpt;
141 bool byte_imm = IS_SIMM8(value);
buzbee091cc402014-03-31 10:14:40 -0700142 DCHECK(!r_dest_src1.IsFloat());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700143 if (r_dest_src1.Is64Bit()) {
144 switch (op) {
145 case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break;
146 case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700147 case kOpLsl: opcode = kX86Sal64RI; break;
148 case kOpLsr: opcode = kX86Shr64RI; break;
149 case kOpAsr: opcode = kX86Sar64RI; break;
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700150 case kOpCmp: opcode = byte_imm ? kX86Cmp64RI8 : kX86Cmp64RI; break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700151 default:
152 LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op;
153 }
154 } else {
155 switch (op) {
156 case kOpLsl: opcode = kX86Sal32RI; break;
157 case kOpLsr: opcode = kX86Shr32RI; break;
158 case kOpAsr: opcode = kX86Sar32RI; break;
159 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
160 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
161 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
162 // case kOpSbb: opcode = kX86Sbb32RI; break;
163 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
164 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
165 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
166 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
167 case kOpMov:
168 /*
169 * Moving the constant zero into register can be specialized as an xor of the register.
170 * However, that sets eflags while the move does not. For that reason here, always do
171 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
172 */
173 opcode = kX86Mov32RI;
174 break;
175 case kOpMul:
176 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
177 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
Mark Mendelle87f9b52014-04-30 14:13:18 -0400178 case kOp2Byte:
179 opcode = kX86Mov32RI;
180 value = static_cast<int8_t>(value);
181 break;
182 case kOp2Short:
183 opcode = kX86Mov32RI;
184 value = static_cast<int16_t>(value);
185 break;
186 case kOp2Char:
187 opcode = kX86Mov32RI;
188 value = static_cast<uint16_t>(value);
189 break;
190 case kOpNeg:
191 opcode = kX86Mov32RI;
192 value = -value;
193 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700194 default:
195 LOG(FATAL) << "Bad case in OpRegImm " << op;
196 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700197 }
buzbee2700f7e2014-03-07 09:46:20 -0800198 return NewLIR2(opcode, r_dest_src1.GetReg(), value);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700199}
200
buzbee2700f7e2014-03-07 09:46:20 -0800201LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700202 bool is64Bit = r_dest_src1.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700203 X86OpCode opcode = kX86Nop;
204 bool src2_must_be_cx = false;
205 switch (op) {
206 // X86 unary opcodes
207 case kOpMvn:
208 OpRegCopy(r_dest_src1, r_src2);
209 return OpReg(kOpNot, r_dest_src1);
210 case kOpNeg:
211 OpRegCopy(r_dest_src1, r_src2);
212 return OpReg(kOpNeg, r_dest_src1);
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100213 case kOpRev:
214 OpRegCopy(r_dest_src1, r_src2);
215 return OpReg(kOpRev, r_dest_src1);
216 case kOpRevsh:
217 OpRegCopy(r_dest_src1, r_src2);
218 OpReg(kOpRev, r_dest_src1);
219 return OpRegImm(kOpAsr, r_dest_src1, 16);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700220 // X86 binary opcodes
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700221 case kOpSub: opcode = is64Bit ? kX86Sub64RR : kX86Sub32RR; break;
222 case kOpSbc: opcode = is64Bit ? kX86Sbb64RR : kX86Sbb32RR; break;
223 case kOpLsl: opcode = is64Bit ? kX86Sal64RC : kX86Sal32RC; src2_must_be_cx = true; break;
224 case kOpLsr: opcode = is64Bit ? kX86Shr64RC : kX86Shr32RC; src2_must_be_cx = true; break;
225 case kOpAsr: opcode = is64Bit ? kX86Sar64RC : kX86Sar32RC; src2_must_be_cx = true; break;
226 case kOpMov: opcode = is64Bit ? kX86Mov64RR : kX86Mov32RR; break;
227 case kOpCmp: opcode = is64Bit ? kX86Cmp64RR : kX86Cmp32RR; break;
228 case kOpAdd: opcode = is64Bit ? kX86Add64RR : kX86Add32RR; break;
229 case kOpAdc: opcode = is64Bit ? kX86Adc64RR : kX86Adc32RR; break;
230 case kOpAnd: opcode = is64Bit ? kX86And64RR : kX86And32RR; break;
231 case kOpOr: opcode = is64Bit ? kX86Or64RR : kX86Or32RR; break;
232 case kOpXor: opcode = is64Bit ? kX86Xor64RR : kX86Xor32RR; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700233 case kOp2Byte:
buzbee091cc402014-03-31 10:14:40 -0700234 // TODO: there are several instances of this check. A utility function perhaps?
235 // TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage?
Brian Carlstrom7940e442013-07-12 13:46:57 -0700236 // Use shifts instead of a byte operand if the source can't be byte accessed.
Ian Rogersb28c1c02014-11-08 11:21:21 -0800237 if (r_src2.GetRegNum() >= rs_rX86_SP_32.GetRegNum()) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700238 NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
239 NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
240 return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
241 is64Bit ? 56 : 24);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700242 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700243 opcode = is64Bit ? kX86Bkpt : kX86Movsx8RR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700244 }
245 break;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700246 case kOp2Short: opcode = is64Bit ? kX86Bkpt : kX86Movsx16RR; break;
247 case kOp2Char: opcode = is64Bit ? kX86Bkpt : kX86Movzx16RR; break;
248 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RR; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700249 default:
250 LOG(FATAL) << "Bad case in OpRegReg " << op;
251 break;
252 }
buzbee091cc402014-03-31 10:14:40 -0700253 CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg());
buzbee2700f7e2014-03-07 09:46:20 -0800254 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700255}
256
buzbee2700f7e2014-03-07 09:46:20 -0800257LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
buzbee091cc402014-03-31 10:14:40 -0700258 DCHECK(!r_base.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800259 X86OpCode opcode = kX86Nop;
buzbee2700f7e2014-03-07 09:46:20 -0800260 int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800261 switch (move_type) {
262 case kMov8GP:
buzbee091cc402014-03-31 10:14:40 -0700263 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800264 opcode = kX86Mov8RM;
265 break;
266 case kMov16GP:
buzbee091cc402014-03-31 10:14:40 -0700267 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800268 opcode = kX86Mov16RM;
269 break;
270 case kMov32GP:
buzbee091cc402014-03-31 10:14:40 -0700271 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800272 opcode = kX86Mov32RM;
273 break;
274 case kMov32FP:
buzbee091cc402014-03-31 10:14:40 -0700275 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800276 opcode = kX86MovssRM;
277 break;
278 case kMov64FP:
buzbee091cc402014-03-31 10:14:40 -0700279 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800280 opcode = kX86MovsdRM;
281 break;
282 case kMovU128FP:
buzbee091cc402014-03-31 10:14:40 -0700283 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800284 opcode = kX86MovupsRM;
285 break;
286 case kMovA128FP:
buzbee091cc402014-03-31 10:14:40 -0700287 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800288 opcode = kX86MovapsRM;
289 break;
290 case kMovLo128FP:
buzbee091cc402014-03-31 10:14:40 -0700291 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800292 opcode = kX86MovlpsRM;
293 break;
294 case kMovHi128FP:
buzbee091cc402014-03-31 10:14:40 -0700295 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800296 opcode = kX86MovhpsRM;
297 break;
298 case kMov64GP:
299 case kMovLo64FP:
300 case kMovHi64FP:
301 default:
302 LOG(FATAL) << "Bad case in OpMovRegMem";
303 break;
304 }
305
buzbee2700f7e2014-03-07 09:46:20 -0800306 return NewLIR3(opcode, dest, r_base.GetReg(), offset);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800307}
308
buzbee2700f7e2014-03-07 09:46:20 -0800309LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
buzbee091cc402014-03-31 10:14:40 -0700310 DCHECK(!r_base.IsFloat());
buzbee2700f7e2014-03-07 09:46:20 -0800311 int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800312
313 X86OpCode opcode = kX86Nop;
314 switch (move_type) {
315 case kMov8GP:
buzbee091cc402014-03-31 10:14:40 -0700316 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800317 opcode = kX86Mov8MR;
318 break;
319 case kMov16GP:
buzbee091cc402014-03-31 10:14:40 -0700320 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800321 opcode = kX86Mov16MR;
322 break;
323 case kMov32GP:
buzbee091cc402014-03-31 10:14:40 -0700324 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800325 opcode = kX86Mov32MR;
326 break;
327 case kMov32FP:
buzbee091cc402014-03-31 10:14:40 -0700328 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800329 opcode = kX86MovssMR;
330 break;
331 case kMov64FP:
buzbee091cc402014-03-31 10:14:40 -0700332 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800333 opcode = kX86MovsdMR;
334 break;
335 case kMovU128FP:
buzbee091cc402014-03-31 10:14:40 -0700336 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800337 opcode = kX86MovupsMR;
338 break;
339 case kMovA128FP:
buzbee091cc402014-03-31 10:14:40 -0700340 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800341 opcode = kX86MovapsMR;
342 break;
343 case kMovLo128FP:
buzbee091cc402014-03-31 10:14:40 -0700344 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800345 opcode = kX86MovlpsMR;
346 break;
347 case kMovHi128FP:
buzbee091cc402014-03-31 10:14:40 -0700348 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800349 opcode = kX86MovhpsMR;
350 break;
351 case kMov64GP:
352 case kMovLo64FP:
353 case kMovHi64FP:
354 default:
355 LOG(FATAL) << "Bad case in OpMovMemReg";
356 break;
357 }
358
buzbee2700f7e2014-03-07 09:46:20 -0800359 return NewLIR3(opcode, r_base.GetReg(), offset, src);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800360}
361
buzbee2700f7e2014-03-07 09:46:20 -0800362LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800363 // The only conditional reg to reg operation supported is Cmov
364 DCHECK_EQ(op, kOpCmov);
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700365 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
366 return NewLIR3(r_dest.Is64Bit() ? kX86Cmov64RRC : kX86Cmov32RRC, r_dest.GetReg(),
367 r_src.GetReg(), X86ConditionEncoding(cc));
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800368}
369
buzbee2700f7e2014-03-07 09:46:20 -0800370LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700371 bool is64Bit = r_dest.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700372 X86OpCode opcode = kX86Nop;
373 switch (op) {
374 // X86 binary opcodes
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700375 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
376 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
377 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
378 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
379 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
380 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
381 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700382 case kOp2Byte: opcode = kX86Movsx8RM; break;
383 case kOp2Short: opcode = kX86Movsx16RM; break;
384 case kOp2Char: opcode = kX86Movzx16RM; break;
385 case kOpMul:
386 default:
387 LOG(FATAL) << "Bad case in OpRegMem " << op;
388 break;
389 }
buzbee2700f7e2014-03-07 09:46:20 -0800390 LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100391 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800392 DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800393 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
394 }
395 return l;
396}
397
398LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
399 DCHECK_NE(rl_dest.location, kLocPhysReg);
400 int displacement = SRegOffset(rl_dest.s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700401 bool is64Bit = rl_dest.wide != 0;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800402 X86OpCode opcode = kX86Nop;
403 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700404 case kOpSub: opcode = is64Bit ? kX86Sub64MR : kX86Sub32MR; break;
405 case kOpMov: opcode = is64Bit ? kX86Mov64MR : kX86Mov32MR; break;
406 case kOpCmp: opcode = is64Bit ? kX86Cmp64MR : kX86Cmp32MR; break;
407 case kOpAdd: opcode = is64Bit ? kX86Add64MR : kX86Add32MR; break;
408 case kOpAnd: opcode = is64Bit ? kX86And64MR : kX86And32MR; break;
409 case kOpOr: opcode = is64Bit ? kX86Or64MR : kX86Or32MR; break;
410 case kOpXor: opcode = is64Bit ? kX86Xor64MR : kX86Xor32MR; break;
411 case kOpLsl: opcode = is64Bit ? kX86Sal64MC : kX86Sal32MC; break;
412 case kOpLsr: opcode = is64Bit ? kX86Shr64MC : kX86Shr32MC; break;
413 case kOpAsr: opcode = is64Bit ? kX86Sar64MC : kX86Sar32MC; break;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800414 default:
415 LOG(FATAL) << "Bad case in OpMemReg " << op;
416 break;
417 }
Ian Rogersb28c1c02014-11-08 11:21:21 -0800418 LIR *l = NewLIR3(opcode, rs_rX86_SP_32.GetReg(), displacement, r_value);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100419 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
420 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
421 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
422 }
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800423 return l;
424}
425
buzbee2700f7e2014-03-07 09:46:20 -0800426LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800427 DCHECK_NE(rl_value.location, kLocPhysReg);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700428 bool is64Bit = r_dest.Is64Bit();
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800429 int displacement = SRegOffset(rl_value.s_reg_low);
430 X86OpCode opcode = kX86Nop;
431 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700432 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
433 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
434 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
435 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
436 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
437 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
438 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
439 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RM; break;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800440 default:
441 LOG(FATAL) << "Bad case in OpRegMem " << op;
442 break;
443 }
Ian Rogersb28c1c02014-11-08 11:21:21 -0800444 LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP_32.GetReg(), displacement);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100445 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
446 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
447 }
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800448 return l;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700449}
450
buzbee2700f7e2014-03-07 09:46:20 -0800451LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
452 RegStorage r_src2) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700453 bool is64Bit = r_dest.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700454 if (r_dest != r_src1 && r_dest != r_src2) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700455 if (op == kOpAdd) { // lea special case, except can't encode rbp as base
Brian Carlstrom7940e442013-07-12 13:46:57 -0700456 if (r_src1 == r_src2) {
457 OpRegCopy(r_dest, r_src1);
458 return OpRegImm(kOpLsl, r_dest, 1);
buzbee2700f7e2014-03-07 09:46:20 -0800459 } else if (r_src1 != rs_rBP) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700460 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
461 r_src1.GetReg() /* base */, r_src2.GetReg() /* index */,
462 0 /* scale */, 0 /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700463 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700464 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
465 r_src2.GetReg() /* base */, r_src1.GetReg() /* index */,
466 0 /* scale */, 0 /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700467 }
468 } else {
469 OpRegCopy(r_dest, r_src1);
470 return OpRegReg(op, r_dest, r_src2);
471 }
472 } else if (r_dest == r_src1) {
473 return OpRegReg(op, r_dest, r_src2);
474 } else { // r_dest == r_src2
475 switch (op) {
476 case kOpSub: // non-commutative
477 OpReg(kOpNeg, r_dest);
478 op = kOpAdd;
479 break;
480 case kOpSbc:
481 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
buzbee2700f7e2014-03-07 09:46:20 -0800482 RegStorage t_reg = AllocTemp();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700483 OpRegCopy(t_reg, r_src1);
484 OpRegReg(op, t_reg, r_src2);
buzbee7a11ab02014-04-28 20:02:38 -0700485 LIR* res = OpRegCopyNoInsert(r_dest, t_reg);
486 AppendLIR(res);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700487 FreeTemp(t_reg);
488 return res;
489 }
490 case kOpAdd: // commutative
491 case kOpOr:
492 case kOpAdc:
493 case kOpAnd:
494 case kOpXor:
Pavel Vyssotski4ee71b22014-11-18 11:51:24 +0600495 case kOpMul:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700496 break;
497 default:
498 LOG(FATAL) << "Bad case in OpRegRegReg " << op;
499 }
500 return OpRegReg(op, r_dest, r_src1);
501 }
502}
503
buzbee2700f7e2014-03-07 09:46:20 -0800504LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) {
Elena Sayapinadd644502014-07-01 18:39:52 +0700505 if (op == kOpMul && !cu_->target64) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700506 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
buzbee2700f7e2014-03-07 09:46:20 -0800507 return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
Elena Sayapinadd644502014-07-01 18:39:52 +0700508 } else if (op == kOpAnd && !cu_->target64) {
buzbee091cc402014-03-31 10:14:40 -0700509 if (value == 0xFF && r_src.Low4()) {
buzbee2700f7e2014-03-07 09:46:20 -0800510 return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700511 } else if (value == 0xFFFF) {
buzbee2700f7e2014-03-07 09:46:20 -0800512 return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700513 }
514 }
515 if (r_dest != r_src) {
Andreas Gampe0b9203e2015-01-22 20:39:27 -0800516 if ((false) && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700517 // TODO: fix bug in LEA encoding when disp == 0
buzbee2700f7e2014-03-07 09:46:20 -0800518 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r5sib_no_base /* base */,
519 r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700520 } else if (op == kOpAdd) { // lea add special case
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700521 return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
Ian Rogersb28c1c02014-11-08 11:21:21 -0800522 r_src.GetReg() /* base */, rs_rX86_SP_32.GetReg()/*r4sib_no_index*/ /* index */,
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700523 0 /* scale */, value /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700524 }
525 OpRegCopy(r_dest, r_src);
526 }
527 return OpRegImm(op, r_dest, value);
528}
529
Ian Rogersdd7624d2014-03-14 17:43:00 -0700530LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
Andreas Gampe2f244e92014-05-08 03:35:25 -0700531 DCHECK_EQ(kX86, cu_->instruction_set);
532 X86OpCode opcode = kX86Bkpt;
533 switch (op) {
534 case kOpBlx: opcode = kX86CallT; break;
535 case kOpBx: opcode = kX86JmpT; break;
536 default:
537 LOG(FATAL) << "Bad opcode: " << op;
538 break;
539 }
540 return NewLIR1(opcode, thread_offset.Int32Value());
541}
542
543LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
544 DCHECK_EQ(kX86_64, cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700545 X86OpCode opcode = kX86Bkpt;
546 switch (op) {
547 case kOpBlx: opcode = kX86CallT; break;
Brian Carlstrom60d7a652014-03-13 18:10:08 -0700548 case kOpBx: opcode = kX86JmpT; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700549 default:
550 LOG(FATAL) << "Bad opcode: " << op;
551 break;
552 }
Ian Rogers468532e2013-08-05 10:56:33 -0700553 return NewLIR1(opcode, thread_offset.Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700554}
555
buzbee2700f7e2014-03-07 09:46:20 -0800556LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700557 X86OpCode opcode = kX86Bkpt;
558 switch (op) {
559 case kOpBlx: opcode = kX86CallM; break;
560 default:
561 LOG(FATAL) << "Bad opcode: " << op;
562 break;
563 }
buzbee2700f7e2014-03-07 09:46:20 -0800564 return NewLIR2(opcode, r_base.GetReg(), disp);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700565}
566
buzbee2700f7e2014-03-07 09:46:20 -0800567LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700568 int32_t val_lo = Low32Bits(value);
569 int32_t val_hi = High32Bits(value);
buzbee2700f7e2014-03-07 09:46:20 -0800570 int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700571 LIR *res;
Mark Mendelle87f9b52014-04-30 14:13:18 -0400572 bool is_fp = r_dest.IsFloat();
buzbee2700f7e2014-03-07 09:46:20 -0800573 // TODO: clean this up once we fully recognize 64-bit storage containers.
574 if (is_fp) {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700575 DCHECK(r_dest.IsDouble());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700576 if (value == 0) {
Mark Mendell27dee8b2014-12-01 19:06:12 -0500577 return NewLIR2(kX86XorpdRR, low_reg_val, low_reg_val);
Vladimir Marko1961b602015-04-08 20:51:48 +0100578 } else if (pc_rel_base_reg_.Valid() || cu_->target64) {
Mark Mendell67c39c42014-01-31 17:28:00 -0800579 // We will load the value from the literal area.
580 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700581 if (data_target == nullptr) {
Mark Mendell67c39c42014-01-31 17:28:00 -0800582 data_target = AddWideData(&literal_list_, val_lo, val_hi);
583 }
584
Mark Mendell67c39c42014-01-31 17:28:00 -0800585 // Load the proper value from the literal area.
Mark Mendell27dee8b2014-12-01 19:06:12 -0500586 // We don't know the proper offset for the value, so pick one that
587 // will force 4 byte offset. We will fix this up in the assembler
588 // later to have the right value.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100589 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Mark Mendell27dee8b2014-12-01 19:06:12 -0500590 if (cu_->target64) {
591 res = NewLIR3(kX86MovsdRM, low_reg_val, kRIPReg, 256 /* bogus */);
592 } else {
Vladimir Marko1961b602015-04-08 20:51:48 +0100593 // Get the PC to a register and get the anchor.
594 LIR* anchor;
595 RegStorage r_pc = GetPcAndAnchor(&anchor);
Mark Mendell27dee8b2014-12-01 19:06:12 -0500596
Vladimir Marko1961b602015-04-08 20:51:48 +0100597 res = LoadBaseDisp(r_pc, kDummy32BitOffset, RegStorage::FloatSolo64(low_reg_val),
Mark Mendell27dee8b2014-12-01 19:06:12 -0500598 kDouble, kNotVolatile);
Vladimir Marko1961b602015-04-08 20:51:48 +0100599 res->operands[4] = WrapPointer(anchor);
600 if (IsTemp(r_pc)) {
601 FreeTemp(r_pc);
602 }
Mark Mendell27dee8b2014-12-01 19:06:12 -0500603 }
Mark Mendell67c39c42014-01-31 17:28:00 -0800604 res->target = data_target;
605 res->flags.fixup = kFixupLoad;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700606 } else {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700607 if (r_dest.IsPair()) {
608 if (val_lo == 0) {
609 res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
610 } else {
611 res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
612 }
613 if (val_hi != 0) {
614 RegStorage r_dest_hi = AllocTempDouble();
615 LoadConstantNoClobber(r_dest_hi, val_hi);
616 NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
617 FreeTemp(r_dest_hi);
618 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700619 } else {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700620 RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
621 res = LoadConstantWide(r_temp, value);
622 OpRegCopyWide(r_dest, r_temp);
623 FreeTemp(r_temp);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700624 }
625 }
626 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700627 if (r_dest.IsPair()) {
628 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
629 LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
630 } else {
Yixin Shou5192cbb2014-07-01 13:48:17 -0400631 if (value == 0) {
Serguei Katkov1c557032014-06-23 13:23:38 +0700632 res = NewLIR2(kX86Xor64RR, r_dest.GetReg(), r_dest.GetReg());
Yixin Shou5192cbb2014-07-01 13:48:17 -0400633 } else if (value >= INT_MIN && value <= INT_MAX) {
634 res = NewLIR2(kX86Mov64RI32, r_dest.GetReg(), val_lo);
635 } else {
636 res = NewLIR3(kX86Mov64RI64, r_dest.GetReg(), val_hi, val_lo);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700637 }
638 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700639 }
640 return res;
641}
642
buzbee2700f7e2014-03-07 09:46:20 -0800643LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100644 int displacement, RegStorage r_dest, OpSize size) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700645 LIR *load = nullptr;
646 LIR *load2 = nullptr;
buzbee2700f7e2014-03-07 09:46:20 -0800647 bool is_array = r_index.Valid();
buzbee091cc402014-03-31 10:14:40 -0700648 bool pair = r_dest.IsPair();
649 bool is64bit = ((size == k64) || (size == kDouble));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700650 X86OpCode opcode = kX86Nop;
651 switch (size) {
buzbee695d13a2014-04-19 13:32:20 -0700652 case k64:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700653 case kDouble:
buzbee091cc402014-03-31 10:14:40 -0700654 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700655 opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700656 } else if (!pair) {
657 opcode = is_array ? kX86Mov64RA : kX86Mov64RM;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700658 } else {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700659 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
660 }
661 // TODO: double store is to unaligned address
662 DCHECK_EQ((displacement & 0x3), 0);
663 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700664 case kWord:
Elena Sayapinadd644502014-07-01 18:39:52 +0700665 if (cu_->target64) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700666 opcode = is_array ? kX86Mov64RA : kX86Mov64RM;
667 CHECK_EQ(is_array, false);
668 CHECK_EQ(r_dest.IsFloat(), false);
669 break;
Ian Rogersfc787ec2014-10-09 21:56:44 -0700670 }
671 FALLTHROUGH_INTENDED; // else fall-through to k32 case
buzbee695d13a2014-04-19 13:32:20 -0700672 case k32:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700673 case kSingle:
buzbee695d13a2014-04-19 13:32:20 -0700674 case kReference: // TODO: update for reference decompression on 64-bit targets.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700675 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
buzbee091cc402014-03-31 10:14:40 -0700676 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700677 opcode = is_array ? kX86MovssRA : kX86MovssRM;
buzbee091cc402014-03-31 10:14:40 -0700678 DCHECK(r_dest.IsFloat());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700679 }
680 DCHECK_EQ((displacement & 0x3), 0);
681 break;
682 case kUnsignedHalf:
683 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
684 DCHECK_EQ((displacement & 0x1), 0);
685 break;
686 case kSignedHalf:
687 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
688 DCHECK_EQ((displacement & 0x1), 0);
689 break;
690 case kUnsignedByte:
691 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
692 break;
693 case kSignedByte:
694 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
695 break;
696 default:
697 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
698 }
699
700 if (!is_array) {
701 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800702 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700703 } else {
buzbee091cc402014-03-31 10:14:40 -0700704 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
705 if (r_base == r_dest.GetLow()) {
Dave Allison69dfe512014-07-11 17:11:58 +0000706 load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700707 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000708 load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700709 } else {
buzbee091cc402014-03-31 10:14:40 -0700710 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
711 load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700712 displacement + HIWORD_OFFSET);
713 }
714 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100715 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800716 DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700717 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
718 true /* is_load */, is64bit);
719 if (pair) {
720 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
721 true /* is_load */, is64bit);
722 }
723 }
724 } else {
725 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800726 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700727 displacement + LOWORD_OFFSET);
728 } else {
buzbee091cc402014-03-31 10:14:40 -0700729 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
730 if (r_base == r_dest.GetLow()) {
731 if (r_dest.GetHigh() == r_index) {
Mark Mendellae427c32014-01-24 09:17:22 -0800732 // We can't use either register for the first load.
buzbee2700f7e2014-03-07 09:46:20 -0800733 RegStorage temp = AllocTemp();
Dave Allison69dfe512014-07-11 17:11:58 +0000734 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800735 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000736 load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800737 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700738 OpRegCopy(r_dest.GetHigh(), temp);
Mark Mendellae427c32014-01-24 09:17:22 -0800739 FreeTemp(temp);
740 } else {
Dave Allison69dfe512014-07-11 17:11:58 +0000741 load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800742 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000743 load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800744 displacement + LOWORD_OFFSET);
745 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700746 } else {
buzbee091cc402014-03-31 10:14:40 -0700747 if (r_dest.GetLow() == r_index) {
Mark Mendellae427c32014-01-24 09:17:22 -0800748 // We can't use either register for the first load.
buzbee2700f7e2014-03-07 09:46:20 -0800749 RegStorage temp = AllocTemp();
750 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800751 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700752 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800753 displacement + HIWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700754 OpRegCopy(r_dest.GetLow(), temp);
Mark Mendellae427c32014-01-24 09:17:22 -0800755 FreeTemp(temp);
756 } else {
buzbee091cc402014-03-31 10:14:40 -0700757 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800758 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700759 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800760 displacement + HIWORD_OFFSET);
761 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700762 }
763 }
764 }
765
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700766 // Always return first load generated as this might cause a fault if base is null.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700767 return load;
768}
769
770/* Load value from base + scaled index. */
buzbee2700f7e2014-03-07 09:46:20 -0800771LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
772 int scale, OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100773 return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700774}
775
Andreas Gampe3c12c512014-06-24 18:46:29 +0000776LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
777 OpSize size, VolatileKind is_volatile) {
Vladimir Marko674744e2014-04-24 15:18:26 +0100778 // LoadBaseDisp() will emit correct insn for atomic load on x86
779 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Vladimir Marko674744e2014-04-24 15:18:26 +0100780
Andreas Gampe3c12c512014-06-24 18:46:29 +0000781 LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
782 size);
783
784 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700785 GenMemBarrier(kLoadAny); // Only a scheduling barrier.
Andreas Gampe3c12c512014-06-24 18:46:29 +0000786 }
787
788 return load;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700789}
790
buzbee2700f7e2014-03-07 09:46:20 -0800791LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700792 int displacement, RegStorage r_src, OpSize size,
793 int opt_flags) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700794 LIR *store = nullptr;
795 LIR *store2 = nullptr;
buzbee2700f7e2014-03-07 09:46:20 -0800796 bool is_array = r_index.Valid();
buzbee091cc402014-03-31 10:14:40 -0700797 bool pair = r_src.IsPair();
798 bool is64bit = (size == k64) || (size == kDouble);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700799 bool consider_non_temporal = false;
800
Brian Carlstrom7940e442013-07-12 13:46:57 -0700801 X86OpCode opcode = kX86Nop;
802 switch (size) {
buzbee695d13a2014-04-19 13:32:20 -0700803 case k64:
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700804 consider_non_temporal = true;
Ian Rogersfc787ec2014-10-09 21:56:44 -0700805 FALLTHROUGH_INTENDED;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700806 case kDouble:
buzbee091cc402014-03-31 10:14:40 -0700807 if (r_src.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700808 opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700809 } else if (!pair) {
810 opcode = is_array ? kX86Mov64AR : kX86Mov64MR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700811 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700812 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700813 }
814 // TODO: double store is to unaligned address
815 DCHECK_EQ((displacement & 0x3), 0);
816 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700817 case kWord:
Elena Sayapinadd644502014-07-01 18:39:52 +0700818 if (cu_->target64) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700819 opcode = is_array ? kX86Mov64AR : kX86Mov64MR;
820 CHECK_EQ(is_array, false);
821 CHECK_EQ(r_src.IsFloat(), false);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700822 consider_non_temporal = true;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700823 break;
Ian Rogersfc787ec2014-10-09 21:56:44 -0700824 }
825 FALLTHROUGH_INTENDED; // else fall-through to k32 case
buzbee695d13a2014-04-19 13:32:20 -0700826 case k32:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700827 case kSingle:
buzbee695d13a2014-04-19 13:32:20 -0700828 case kReference:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700829 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
buzbee091cc402014-03-31 10:14:40 -0700830 if (r_src.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700831 opcode = is_array ? kX86MovssAR : kX86MovssMR;
buzbee091cc402014-03-31 10:14:40 -0700832 DCHECK(r_src.IsSingle());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700833 }
834 DCHECK_EQ((displacement & 0x3), 0);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700835 consider_non_temporal = true;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700836 break;
837 case kUnsignedHalf:
838 case kSignedHalf:
839 opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
840 DCHECK_EQ((displacement & 0x1), 0);
841 break;
842 case kUnsignedByte:
843 case kSignedByte:
844 opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
845 break;
846 default:
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000847 LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
Brian Carlstrom7940e442013-07-12 13:46:57 -0700848 }
849
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700850 // Handle non temporal hint here.
851 if (consider_non_temporal && ((opt_flags & MIR_STORE_NON_TEMPORAL) != 0)) {
852 switch (opcode) {
853 // We currently only handle 32/64 bit moves here.
854 case kX86Mov64AR:
855 opcode = kX86Movnti64AR;
856 break;
857 case kX86Mov64MR:
858 opcode = kX86Movnti64MR;
859 break;
860 case kX86Mov32AR:
861 opcode = kX86Movnti32AR;
862 break;
863 case kX86Mov32MR:
864 opcode = kX86Movnti32MR;
865 break;
866 default:
867 // Do nothing here.
868 break;
869 }
870 }
871
Brian Carlstrom7940e442013-07-12 13:46:57 -0700872 if (!is_array) {
873 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800874 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700875 } else {
buzbee091cc402014-03-31 10:14:40 -0700876 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here.
877 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
878 store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700879 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100880 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800881 DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700882 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
883 false /* is_load */, is64bit);
884 if (pair) {
885 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
886 false /* is_load */, is64bit);
887 }
888 }
889 } else {
890 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800891 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
892 displacement + LOWORD_OFFSET, r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700893 } else {
buzbee091cc402014-03-31 10:14:40 -0700894 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here.
buzbee2700f7e2014-03-07 09:46:20 -0800895 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
buzbee091cc402014-03-31 10:14:40 -0700896 displacement + LOWORD_OFFSET, r_src.GetLowReg());
buzbee2700f7e2014-03-07 09:46:20 -0800897 store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
buzbee091cc402014-03-31 10:14:40 -0700898 displacement + HIWORD_OFFSET, r_src.GetHighReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700899 }
900 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700901 return store;
902}
903
904/* store value base base + scaled index. */
buzbee2700f7e2014-03-07 09:46:20 -0800905LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Andreas Gampe3c12c512014-06-24 18:46:29 +0000906 int scale, OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100907 return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700908}
909
Andreas Gampe3c12c512014-06-24 18:46:29 +0000910LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
911 VolatileKind is_volatile) {
912 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700913 GenMemBarrier(kAnyStore); // Only a scheduling barrier.
Andreas Gampe3c12c512014-06-24 18:46:29 +0000914 }
915
Vladimir Marko674744e2014-04-24 15:18:26 +0100916 // StoreBaseDisp() will emit correct insn for atomic store on x86
917 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Fred Shih37f05ef2014-07-16 18:38:08 -0700918 // x86 only allows registers EAX-EDX to be used as byte registers, if the input src is not
919 // valid, allocate a temp.
920 bool allocated_temp = false;
921 if (size == kUnsignedByte || size == kSignedByte) {
922 if (!cu_->target64 && !r_src.Low4()) {
923 RegStorage r_input = r_src;
924 r_src = AllocateByteRegister();
925 OpRegCopy(r_src, r_input);
926 allocated_temp = true;
927 }
928 }
Vladimir Marko674744e2014-04-24 15:18:26 +0100929
Andreas Gampe3c12c512014-06-24 18:46:29 +0000930 LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
931
932 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700933 // A volatile load might follow the volatile store so insert a StoreLoad barrier.
934 // This does require a fence, even on x86.
935 GenMemBarrier(kAnyAny);
Andreas Gampe3c12c512014-06-24 18:46:29 +0000936 }
937
Fred Shih37f05ef2014-07-16 18:38:08 -0700938 if (allocated_temp) {
939 FreeTemp(r_src);
940 }
941
Andreas Gampe3c12c512014-06-24 18:46:29 +0000942 return store;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700943}
944
buzbee2700f7e2014-03-07 09:46:20 -0800945LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
Dave Allison69dfe512014-07-11 17:11:58 +0000946 int offset, int check_value, LIR* target, LIR** compare) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700947 UNUSED(temp_reg); // Comparison performed directly with memory.
948 LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
949 offset, check_value);
950 if (compare != nullptr) {
951 *compare = inst;
952 }
953 LIR* branch = OpCondBranch(cond, target);
954 return branch;
Mark Mendell766e9292014-01-27 07:55:47 -0800955}
956
Vladimir Marko1961b602015-04-08 20:51:48 +0100957void X86Mir2Lir::AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight) {
958 if (cu_->target64) {
959 Mir2Lir::AnalyzeMIR(core_counts, mir, weight);
Mark Mendell67c39c42014-01-31 17:28:00 -0800960 return;
961 }
962
Vladimir Marko1961b602015-04-08 20:51:48 +0100963 int opcode = mir->dalvikInsn.opcode;
964 bool uses_pc_rel_load = false;
Mark Mendell67c39c42014-01-31 17:28:00 -0800965 switch (opcode) {
966 // Instructions referencing doubles.
967 case Instruction::CMPL_DOUBLE:
968 case Instruction::CMPG_DOUBLE:
969 case Instruction::NEG_DOUBLE:
970 case Instruction::ADD_DOUBLE:
971 case Instruction::SUB_DOUBLE:
972 case Instruction::MUL_DOUBLE:
973 case Instruction::DIV_DOUBLE:
974 case Instruction::REM_DOUBLE:
975 case Instruction::ADD_DOUBLE_2ADDR:
976 case Instruction::SUB_DOUBLE_2ADDR:
977 case Instruction::MUL_DOUBLE_2ADDR:
978 case Instruction::DIV_DOUBLE_2ADDR:
979 case Instruction::REM_DOUBLE_2ADDR:
Vladimir Marko1961b602015-04-08 20:51:48 +0100980 case kMirOpFusedCmplDouble:
981 case kMirOpFusedCmpgDouble:
982 uses_pc_rel_load = AnalyzeFPInstruction(opcode, mir);
Mark Mendell67c39c42014-01-31 17:28:00 -0800983 break;
Mark Mendell55d0eac2014-02-06 11:02:52 -0800984
Vladimir Marko1961b602015-04-08 20:51:48 +0100985 // Packed switch needs the PC-relative pointer if it's large.
Mark Mendell67c39c42014-01-31 17:28:00 -0800986 case Instruction::PACKED_SWITCH:
Vladimir Marko1961b602015-04-08 20:51:48 +0100987 if (mir_graph_->GetTable(mir, mir->dalvikInsn.vB)[1] > kSmallSwitchThreshold) {
988 uses_pc_rel_load = true;
Mark Mendell27dee8b2014-12-01 19:06:12 -0500989 }
Mark Mendell67c39c42014-01-31 17:28:00 -0800990 break;
Vladimir Marko1961b602015-04-08 20:51:48 +0100991
992 case kMirOpConstVector:
993 uses_pc_rel_load = true;
994 break;
995 case kMirOpPackedMultiply:
996 case kMirOpPackedShiftLeft:
997 case kMirOpPackedSignedShiftRight:
998 case kMirOpPackedUnsignedShiftRight:
999 {
1000 // Byte emulation requires constants from the literal pool.
1001 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1002 if (opsize == kSignedByte || opsize == kUnsignedByte) {
1003 uses_pc_rel_load = true;
1004 }
1005 }
1006 break;
1007
Yixin Shou7071c8d2014-03-05 06:07:48 -05001008 case Instruction::INVOKE_STATIC:
Razvan A Lupusorue5beb182014-08-14 13:49:57 +08001009 case Instruction::INVOKE_STATIC_RANGE:
Vladimir Marko1961b602015-04-08 20:51:48 +01001010 if (mir_graph_->GetMethodLoweringInfo(mir).IsIntrinsic()) {
1011 uses_pc_rel_load = AnalyzeInvokeStaticIntrinsic(mir);
1012 break;
1013 }
1014 FALLTHROUGH_INTENDED;
Mark Mendell67c39c42014-01-31 17:28:00 -08001015 default:
Vladimir Marko1961b602015-04-08 20:51:48 +01001016 Mir2Lir::AnalyzeMIR(core_counts, mir, weight);
Mark Mendell67c39c42014-01-31 17:28:00 -08001017 break;
1018 }
Vladimir Marko1961b602015-04-08 20:51:48 +01001019
1020 if (uses_pc_rel_load) {
1021 DCHECK(pc_rel_temp_ != nullptr);
1022 core_counts[SRegToPMap(pc_rel_temp_->s_reg_low)].count += weight;
1023 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001024}
1025
Vladimir Marko1961b602015-04-08 20:51:48 +01001026bool X86Mir2Lir::AnalyzeFPInstruction(int opcode, MIR* mir) {
1027 DCHECK(!cu_->target64);
Mark Mendell67c39c42014-01-31 17:28:00 -08001028 // Look at all the uses, and see if they are double constants.
Jean Christophe Beylercc794c32014-05-02 09:34:13 -07001029 uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
Mark Mendell67c39c42014-01-31 17:28:00 -08001030 int next_sreg = 0;
1031 if (attrs & DF_UA) {
1032 if (attrs & DF_A_WIDE) {
Vladimir Marko1961b602015-04-08 20:51:48 +01001033 if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
1034 return true;
1035 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001036 next_sreg += 2;
1037 } else {
1038 next_sreg++;
1039 }
1040 }
1041 if (attrs & DF_UB) {
1042 if (attrs & DF_B_WIDE) {
Vladimir Marko1961b602015-04-08 20:51:48 +01001043 if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
1044 return true;
1045 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001046 next_sreg += 2;
1047 } else {
1048 next_sreg++;
1049 }
1050 }
1051 if (attrs & DF_UC) {
1052 if (attrs & DF_C_WIDE) {
Vladimir Marko1961b602015-04-08 20:51:48 +01001053 if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
1054 return true;
1055 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001056 }
1057 }
Vladimir Marko1961b602015-04-08 20:51:48 +01001058 return false;
Mark Mendell67c39c42014-01-31 17:28:00 -08001059}
1060
Vladimir Marko1961b602015-04-08 20:51:48 +01001061inline bool X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +07001062 // If this is a double literal, we will want it in the literal pool on 32b platforms.
Vladimir Marko1961b602015-04-08 20:51:48 +01001063 DCHECK(!cu_->target64);
1064 return use.is_const;
1065}
1066
1067bool X86Mir2Lir::AnalyzeInvokeStaticIntrinsic(MIR* mir) {
1068 // 64 bit RIP addressing doesn't need this analysis.
1069 DCHECK(!cu_->target64);
1070
1071 // Retrieve the type of the intrinsic.
1072 MethodReference method_ref = mir_graph_->GetMethodLoweringInfo(mir).GetTargetMethod();
1073 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1074 DexFileMethodInliner* method_inliner =
1075 cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(method_ref.dex_file);
1076 InlineMethod method;
1077 bool is_intrinsic = method_inliner->IsIntrinsic(method_ref.dex_method_index, &method);
1078 DCHECK(is_intrinsic);
1079
1080 switch (method.opcode) {
1081 case kIntrinsicAbsDouble:
1082 case kIntrinsicMinMaxDouble:
1083 return true;
1084 default:
1085 return false;
Mark Mendell67c39c42014-01-31 17:28:00 -08001086 }
1087}
1088
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001089RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
buzbee30adc732014-05-09 15:10:18 -07001090 loc = UpdateLoc(loc);
1091 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1092 if (GetRegInfo(loc.reg)->IsTemp()) {
1093 Clobber(loc.reg);
1094 FreeTemp(loc.reg);
1095 loc.reg = RegStorage::InvalidReg();
1096 loc.location = kLocDalvikFrame;
1097 }
1098 }
Chao-ying Fue0ccdc02014-06-06 17:32:37 -07001099 DCHECK(CheckCorePoolSanity());
buzbee30adc732014-05-09 15:10:18 -07001100 return loc;
1101}
1102
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001103RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
buzbee30adc732014-05-09 15:10:18 -07001104 loc = UpdateLocWide(loc);
1105 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1106 if (GetRegInfo(loc.reg)->IsTemp()) {
1107 Clobber(loc.reg);
1108 FreeTemp(loc.reg);
1109 loc.reg = RegStorage::InvalidReg();
1110 loc.location = kLocDalvikFrame;
1111 }
1112 }
Chao-ying Fue0ccdc02014-06-06 17:32:37 -07001113 DCHECK(CheckCorePoolSanity());
buzbee30adc732014-05-09 15:10:18 -07001114 return loc;
1115}
Yixin Shou7071c8d2014-03-05 06:07:48 -05001116
Andreas Gampe98430592014-07-27 19:44:50 -07001117LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001118 UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
Andreas Gampe98430592014-07-27 19:44:50 -07001119 if (cu_->target64) {
1120 return OpThreadMem(op, GetThreadOffset<8>(trampoline));
1121 } else {
1122 return OpThreadMem(op, GetThreadOffset<4>(trampoline));
1123 }
1124}
1125
Vladimir Marko1961b602015-04-08 20:51:48 +01001126void X86Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
1127 // Start with the default counts.
1128 Mir2Lir::CountRefs(core_counts, fp_counts, num_regs);
1129
1130 if (pc_rel_temp_ != nullptr) {
1131 // Now, if the dex cache array base temp is used only once outside any loops (weight = 1),
1132 // avoid the promotion, otherwise boost the weight by factor 2 because the full PC-relative
1133 // load sequence is 3 instructions long and by promoting the PC base we save 2 instructions
1134 // per use.
1135 int p_map_idx = SRegToPMap(pc_rel_temp_->s_reg_low);
1136 if (core_counts[p_map_idx].count == 1) {
1137 core_counts[p_map_idx].count = 0;
1138 } else {
1139 core_counts[p_map_idx].count *= 2;
1140 }
1141 }
1142}
1143
1144void X86Mir2Lir::DoPromotion() {
1145 if (!cu_->target64) {
1146 pc_rel_temp_ = mir_graph_->GetNewCompilerTemp(kCompilerTempBackend, false);
1147 }
1148
1149 Mir2Lir::DoPromotion();
1150
1151 if (pc_rel_temp_ != nullptr) {
1152 // Now, if the dex cache array base temp is promoted, remember the register but
1153 // always remove the temp's stack location to avoid unnecessarily bloating the stack.
1154 pc_rel_base_reg_ = mir_graph_->reg_location_[pc_rel_temp_->s_reg_low].reg;
1155 DCHECK(!pc_rel_base_reg_.Valid() || !pc_rel_base_reg_.IsFloat());
1156 mir_graph_->RemoveLastCompilerTemp(kCompilerTempBackend, false, pc_rel_temp_);
1157 pc_rel_temp_ = nullptr;
1158 }
1159}
1160
Brian Carlstrom7940e442013-07-12 13:46:57 -07001161} // namespace art