blob: c69a27e77cc7194f31ad16a0c356c429f8370418 [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
buzbee67bf8852011-08-17 17:51:35 -070017static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
18 INVALID_REG, INVALID_SREG, 0,
19 kLocDalvikFrame, INVALID_REG, INVALID_REG,
20 INVALID_OFFSET};
21static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
22static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
23
buzbeedfd3d702011-08-28 12:56:51 -070024/*
25 * Let helper function take care of everything. Will call
26 * Array::AllocFromCode(type_idx, method, count);
27 * Note: AllocFromCode will handle checks for errNegativeArraySize.
28 */
buzbee67bf8852011-08-17 17:51:35 -070029static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
30 RegLocation rlSrc)
31{
buzbeedfd3d702011-08-28 12:56:51 -070032 oatFlushAllRegs(cUnit); /* Everything to home location */
33 loadWordDisp(cUnit, rSELF,
34 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
35 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
36 loadConstant(cUnit, r0, mir->dalvikInsn.vC); // arg0 <- type_id
37 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
38 opReg(cUnit, kOpBlx, rLR);
39 oatClobberCallRegs(cUnit);
40 RegLocation rlResult = oatGetReturn(cUnit);
41 storeValue(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -070042}
43
44/*
45 * Similar to genNewArray, but with post-allocation initialization.
46 * Verifier guarantees we're dealing with an array class. Current
47 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
48 * Current code also throws internal unimp if not 'L', '[' or 'I'.
49 */
50static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
51{
52 DecodedInstruction* dInsn = &mir->dalvikInsn;
53 int elems;
buzbeedfd3d702011-08-28 12:56:51 -070054 int typeId;
buzbee67bf8852011-08-17 17:51:35 -070055 if (isRange) {
56 elems = dInsn->vA;
buzbeedfd3d702011-08-28 12:56:51 -070057 typeId = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -070058 } else {
59 elems = dInsn->vB;
buzbeedfd3d702011-08-28 12:56:51 -070060 typeId = dInsn->vC;
buzbee67bf8852011-08-17 17:51:35 -070061 }
buzbeedfd3d702011-08-28 12:56:51 -070062 oatFlushAllRegs(cUnit); /* Everything to home location */
63 // TODO: Alloc variant that checks types (see header comment) */
64 UNIMPLEMENTED(WARNING) << "Need AllocFromCode variant w/ extra checks";
65 loadWordDisp(cUnit, rSELF,
66 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
67 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
68 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
69 loadConstant(cUnit, r2, elems); // arg2 <- count
70 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -070071 /*
buzbeedfd3d702011-08-28 12:56:51 -070072 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
73 * return region. Because AllocFromCode placed the new array
74 * in r0, we'll just lock it into place. When debugger support is
75 * added, it may be necessary to additionally copy all return
76 * values to a home location in thread-local storage
buzbee67bf8852011-08-17 17:51:35 -070077 */
buzbee67bf8852011-08-17 17:51:35 -070078 oatLockTemp(cUnit, r0);
buzbeedfd3d702011-08-28 12:56:51 -070079
buzbee67bf8852011-08-17 17:51:35 -070080 // Having a range of 0 is legal
81 if (isRange && (dInsn->vA > 0)) {
82 /*
83 * Bit of ugliness here. We're going generate a mem copy loop
84 * on the register range, but it is possible that some regs
85 * in the range have been promoted. This is unlikely, but
86 * before generating the copy, we'll just force a flush
87 * of any regs in the source range that have been promoted to
88 * home location.
89 */
90 for (unsigned int i = 0; i < dInsn->vA; i++) {
91 RegLocation loc = oatUpdateLoc(cUnit,
92 oatGetSrc(cUnit, mir, i));
93 if (loc.location == kLocPhysReg) {
94 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
95 }
96 }
97 /*
98 * TUNING note: generated code here could be much improved, but
99 * this is an uncommon operation and isn't especially performance
100 * critical.
101 */
102 int rSrc = oatAllocTemp(cUnit);
103 int rDst = oatAllocTemp(cUnit);
104 int rIdx = oatAllocTemp(cUnit);
105 int rVal = rLR; // Using a lot of temps, rLR is known free here
106 // Set up source pointer
107 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
108 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
109 // Set up the target pointer
110 opRegRegImm(cUnit, kOpAdd, rDst, r0,
buzbeec143c552011-08-20 17:38:58 -0700111 Array::DataOffset().Int32Value());
buzbee67bf8852011-08-17 17:51:35 -0700112 // Set up the loop counter (known to be > 0)
113 loadConstant(cUnit, rIdx, dInsn->vA);
114 // Generate the copy loop. Going backwards for convenience
115 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
116 target->defMask = ENCODE_ALL;
117 // Copy next element
118 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
119 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
120 // Use setflags encoding here
121 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
122 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
123 branch->generic.target = (LIR*)target;
124 } else if (!isRange) {
125 // TUNING: interleave
126 for (unsigned int i = 0; i < dInsn->vA; i++) {
127 RegLocation rlArg = loadValue(cUnit,
128 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbeec143c552011-08-20 17:38:58 -0700129 storeBaseDisp(cUnit, r0,
130 Array::DataOffset().Int32Value() +
buzbee67bf8852011-08-17 17:51:35 -0700131 i * 4, rlArg.lowReg, kWord);
132 // If the loadValue caused a temp to be allocated, free it
133 if (oatIsTemp(cUnit, rlArg.lowReg)) {
134 oatFreeTemp(cUnit, rlArg.lowReg);
135 }
136 }
137 }
138}
139
140static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
141{
buzbeee1931742011-08-28 21:15:53 -0700142 bool slow_path = true;
143 bool isObject = ((mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
144 (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE));
145 UNIMPLEMENTED(WARNING) << "Implement sput fast path";
146 int funcOffset;
147 if (slow_path) {
148 if (isObject) {
149 funcOffset = OFFSETOF_MEMBER(Thread, pSetObjStatic);
150 } else {
151 funcOffset = OFFSETOF_MEMBER(Thread, pSet32Static);
152 }
153 oatFlushAllRegs(cUnit);
154 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
155 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
156 loadCurrMethodDirect(cUnit, r1);
157 loadValueDirect(cUnit, rlSrc, r2);
158 opReg(cUnit, kOpBlx, rLR);
159 oatClobberCallRegs(cUnit);
160 } else {
161 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700162#if 0
buzbee67bf8852011-08-17 17:51:35 -0700163 int valOffset = OFFSETOF_MEMBER(StaticField, value);
164 int tReg = oatAllocTemp(cUnit);
165 int objHead;
166 bool isVolatile;
167 bool isSputObject;
168 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
169 mir->meta.calleeMethod : cUnit->method;
170 void* fieldPtr = (void*)
171 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
172 Opcode opcode = mir->dalvikInsn.opcode;
173
174 if (fieldPtr == NULL) {
175 // FIXME: need to handle this case for oat();
176 UNIMPLEMENTED(FATAL);
177 }
178
179#if ANDROID_SMP != 0
180 isVolatile = (opcode == OP_SPUT_VOLATILE) ||
181 (opcode == OP_SPUT_VOLATILE_JUMBO) ||
182 (opcode == OP_SPUT_OBJECT_VOLATILE) ||
183 (opcode == OP_SPUT_OBJECT_VOLATILE_JUMBO);
buzbeec143c552011-08-20 17:38:58 -0700184 assert(isVolatile == artIsVolatileField((Field *) fieldPtr));
buzbee67bf8852011-08-17 17:51:35 -0700185#else
buzbeec143c552011-08-20 17:38:58 -0700186 isVolatile = artIsVolatileField((Field *) fieldPtr);
buzbee67bf8852011-08-17 17:51:35 -0700187#endif
188
189 isSputObject = (opcode == OP_SPUT_OBJECT) ||
190 (opcode == OP_SPUT_OBJECT_VOLATILE);
191
192 rlSrc = oatGetSrc(cUnit, mir, 0);
193 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
194 loadConstant(cUnit, tReg, (int) fieldPtr);
195 if (isSputObject) {
196 objHead = oatAllocTemp(cUnit);
197 loadWordDisp(cUnit, tReg, OFFSETOF_MEMBER(Field, clazz), objHead);
198 }
199 storeWordDisp(cUnit, tReg, valOffset ,rlSrc.lowReg);
200 oatFreeTemp(cUnit, tReg);
201 if (isVolatile) {
202 oatGenMemBarrier(cUnit, kSY);
203 }
204 if (isSputObject) {
205 /* NOTE: marking card based sfield->clazz */
206 markGCCard(cUnit, rlSrc.lowReg, objHead);
207 oatFreeTemp(cUnit, objHead);
208 }
buzbeec143c552011-08-20 17:38:58 -0700209#endif
buzbeee1931742011-08-28 21:15:53 -0700210 }
buzbee67bf8852011-08-17 17:51:35 -0700211}
212
213static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
214{
buzbeee1931742011-08-28 21:15:53 -0700215 bool slow_path = true;
216 UNIMPLEMENTED(WARNING) << "Implement sput-wide fast path";
217 int funcOffset;
218 if (slow_path) {
219 funcOffset = OFFSETOF_MEMBER(Thread, pSet64Static);
220 oatFlushAllRegs(cUnit);
221 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
222 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
223 loadCurrMethodDirect(cUnit, r1);
224 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
225 opReg(cUnit, kOpBlx, rLR);
226 oatClobberCallRegs(cUnit);
227 } else {
228 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700229#if 0
buzbee67bf8852011-08-17 17:51:35 -0700230 int tReg = oatAllocTemp(cUnit);
231 int valOffset = OFFSETOF_MEMBER(StaticField, value);
232 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
233 mir->meta.calleeMethod : cUnit->method;
234 void* fieldPtr = (void*)
235 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
236
237 if (fieldPtr == NULL) {
238 // FIXME: need to handle this case for oat();
239 UNIMPLEMENTED(FATAL);
240 }
241
242 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
243 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
244 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
245
246 storePair(cUnit, tReg, rlSrc.lowReg, rlSrc.highReg);
buzbeec143c552011-08-20 17:38:58 -0700247#endif
buzbeee1931742011-08-28 21:15:53 -0700248 }
buzbee67bf8852011-08-17 17:51:35 -0700249}
250
251
252
253static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
254 RegLocation rlResult, RegLocation rlDest)
255{
buzbeee1931742011-08-28 21:15:53 -0700256 bool slow_path = true;
257 UNIMPLEMENTED(WARNING) << "Implement sget-wide fast path";
258 int funcOffset;
259 if (slow_path) {
260 funcOffset = OFFSETOF_MEMBER(Thread, pGet64Static);
261 oatFlushAllRegs(cUnit);
262 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
263 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
264 loadCurrMethodDirect(cUnit, r1);
265 opReg(cUnit, kOpBlx, rLR);
266 RegLocation rlResult = oatGetReturnWide(cUnit);
267 storeValueWide(cUnit, rlDest, rlResult);
268 } else {
269 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700270#if 0
buzbee67bf8852011-08-17 17:51:35 -0700271 int valOffset = OFFSETOF_MEMBER(StaticField, value);
272 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
273 mir->meta.calleeMethod : cUnit->method;
274 void* fieldPtr = (void*)
275 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
276
277 if (fieldPtr == NULL) {
278 // FIXME: need to handle this case for oat();
279 UNIMPLEMENTED(FATAL);
280 }
281
282 int tReg = oatAllocTemp(cUnit);
283 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
284 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
285 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
286
287 loadPair(cUnit, tReg, rlResult.lowReg, rlResult.highReg);
288
289 storeValueWide(cUnit, rlDest, rlResult);
buzbeec143c552011-08-20 17:38:58 -0700290#endif
buzbeee1931742011-08-28 21:15:53 -0700291 }
buzbee67bf8852011-08-17 17:51:35 -0700292}
293
294static void genSget(CompilationUnit* cUnit, MIR* mir,
295 RegLocation rlResult, RegLocation rlDest)
296{
buzbeee1931742011-08-28 21:15:53 -0700297 bool slow_path = true;
298 bool isObject = ((mir->dalvikInsn.opcode == OP_SGET_OBJECT) ||
299 (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE));
300 UNIMPLEMENTED(WARNING) << "Implement sget fast path";
301 int funcOffset;
302 if (slow_path) {
303 if (isObject) {
304 funcOffset = OFFSETOF_MEMBER(Thread, pGetObjStatic);
305 } else {
306 funcOffset = OFFSETOF_MEMBER(Thread, pGet32Static);
307 }
308 oatFlushAllRegs(cUnit);
309 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
310 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
311 loadCurrMethodDirect(cUnit, r1);
312 opReg(cUnit, kOpBlx, rLR);
313 RegLocation rlResult = oatGetReturn(cUnit);
314 storeValue(cUnit, rlDest, rlResult);
315 } else {
316 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700317#if 0
buzbee67bf8852011-08-17 17:51:35 -0700318 int valOffset = OFFSETOF_MEMBER(StaticField, value);
319 int tReg = oatAllocTemp(cUnit);
320 bool isVolatile;
321 const Method *method = cUnit->method;
322 void* fieldPtr = (void*)
323 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
324
325 if (fieldPtr == NULL) {
326 // FIXME: need to handle this case for oat();
327 UNIMPLEMENTED(FATAL);
328 }
329
330 /*
331 * On SMP systems, Dalvik opcodes found to be referencing
332 * volatile fields are rewritten to their _VOLATILE variant.
333 * However, this does not happen on non-SMP systems. The compiler
334 * still needs to know about volatility to avoid unsafe
335 * optimizations so we determine volatility based on either
336 * the opcode or the field access flags.
337 */
338#if ANDROID_SMP != 0
339 Opcode opcode = mir->dalvikInsn.opcode;
340 isVolatile = (opcode == OP_SGET_VOLATILE) ||
341 (opcode == OP_SGET_OBJECT_VOLATILE);
buzbeec143c552011-08-20 17:38:58 -0700342 assert(isVolatile == artIsVolatileField((Field *) fieldPtr));
buzbee67bf8852011-08-17 17:51:35 -0700343#else
buzbeec143c552011-08-20 17:38:58 -0700344 isVolatile = artIsVolatileField((Field *) fieldPtr);
buzbee67bf8852011-08-17 17:51:35 -0700345#endif
346
347 rlDest = oatGetDest(cUnit, mir, 0);
348 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
349 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
350
351 if (isVolatile) {
352 oatGenMemBarrier(cUnit, kSY);
353 }
354 loadWordDisp(cUnit, tReg, 0, rlResult.lowReg);
355
356 storeValue(cUnit, rlDest, rlResult);
buzbeec143c552011-08-20 17:38:58 -0700357#endif
buzbeee1931742011-08-28 21:15:53 -0700358 }
buzbee67bf8852011-08-17 17:51:35 -0700359}
360
361typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int);
362
363/*
364 * Bit of a hack here - in leiu of a real scheduling pass,
365 * emit the next instruction in static & direct invoke sequences.
366 */
367static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
368 DecodedInstruction* dInsn, int state)
369{
buzbeec143c552011-08-20 17:38:58 -0700370 UNIMPLEMENTED(FATAL) << "Update with new cache model";
371#if 0
buzbee67bf8852011-08-17 17:51:35 -0700372 switch(state) {
373 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700374 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700375 break;
376 case 1: // Get the pResMethods pointer [uses r0, sets r0]
buzbeec143c552011-08-20 17:38:58 -0700377 UNIMPLEMENTED(FATAL) << "Update with new cache";
buzbee67bf8852011-08-17 17:51:35 -0700378 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
379 r0, kWord, INVALID_SREG);
380 break;
381 case 2: // Get the target Method* [uses r0, sets r0]
382 loadBaseDisp(cUnit, mir, r0, dInsn->vB * 4, r0,
383 kWord, INVALID_SREG);
384 break;
385 case 3: // Get the target compiled code address [uses r0, sets rLR]
386 loadBaseDisp(cUnit, mir, r0,
387 OFFSETOF_MEMBER(Method, compiledInsns), rLR,
388 kWord, INVALID_SREG);
389 break;
390 default:
391 return -1;
392 }
buzbeec143c552011-08-20 17:38:58 -0700393#endif
buzbee67bf8852011-08-17 17:51:35 -0700394 return state + 1;
395}
396
buzbeec5ef0462011-08-25 18:44:49 -0700397// Slow path static & direct invoke launch sequence
398static int nextSDCallInsnSP(CompilationUnit* cUnit, MIR* mir,
399 DecodedInstruction* dInsn, int state)
400{
401 switch(state) {
402 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700403 loadCurrMethodDirect(cUnit, r0);
buzbeec5ef0462011-08-25 18:44:49 -0700404 break;
405 case 1: // Get the current Method->DeclaringClass() [sets r0]
406 loadBaseDisp(cUnit, mir, r0,
407 OFFSETOF_MEMBER(art::Method, declaring_class_),
408 r0, kWord, INVALID_SREG);
409 break;
410 case 2: // Method->DeclaringClass()->GetDexCache() [sets r0]
411 loadBaseDisp(cUnit, mir, r0,
412 OFFSETOF_MEMBER(art::Class, dex_cache_), r0, kWord,
413 INVALID_SREG);
414 break;
415 case 3: // Method->DeclaringClass()->GetDexCache()->methodsObjectArr
buzbee5cd21802011-08-26 10:40:14 -0700416 loadBaseDisp(cUnit, mir, r0,
Brian Carlstrom1caa2c22011-08-28 13:02:33 -0700417 art::DexCache::ResolvedMethodsOffset().Int32Value(), r0,
buzbee5cd21802011-08-26 10:40:14 -0700418 kWord, INVALID_SREG);
buzbeec5ef0462011-08-25 18:44:49 -0700419 break;
420 case 4: // Skip past the object header
421 opRegImm(cUnit, kOpAdd, r0, art::Array::DataOffset().Int32Value());
422 break;
423 case 5: // Get the target Method* [uses r0, sets r0]
424 loadBaseDisp(cUnit, mir, r0, dInsn->vB * 4, r0,
425 kWord, INVALID_SREG);
426 break;
427 case 6: // Get the target compiled code address [uses r0, sets rLR]
428 loadBaseDisp(cUnit, mir, r0, art::Method::GetCodeOffset(), rLR,
429 kWord, INVALID_SREG);
430 break;
431 default:
432 return -1;
433 }
434 return state + 1;
435}
436
buzbee67bf8852011-08-17 17:51:35 -0700437/*
438 * Bit of a hack here - in leiu of a real scheduling pass,
439 * emit the next instruction in a virtual invoke sequence.
440 * We can use rLR as a temp prior to target address loading
441 * Note also that we'll load the first argument ("this") into
442 * r1 here rather than the standard loadArgRegs.
443 */
444static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
445 DecodedInstruction* dInsn, int state)
446{
buzbeec143c552011-08-20 17:38:58 -0700447 UNIMPLEMENTED(FATAL) << "Update with new cache model";
448#if 0
buzbee67bf8852011-08-17 17:51:35 -0700449 RegLocation rlArg;
450 switch(state) {
451 case 0: // Get the current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700452 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700453 // Load "this" [set r1]
454 rlArg = oatGetSrc(cUnit, mir, 0);
455 loadValueDirectFixed(cUnit, rlArg, r1);
456 break;
457 case 1: // Get the pResMethods pointer [use r0, set r12]
458 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
459 r12, kWord, INVALID_SREG);
460 // Is "this" null? [use r1]
461 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
462 mir->offset, NULL);
463 break;
464 case 2: // Get the base Method* [use r12, set r0]
465 loadBaseDisp(cUnit, mir, r12, dInsn->vB * 4, r0,
466 kWord, INVALID_SREG);
467 // get this->clazz [use r1, set rLR]
468 loadBaseDisp(cUnit, mir, r1, OFFSETOF_MEMBER(Object, clazz), rLR,
469 kWord, INVALID_SREG);
470 break;
471 case 3: // Get the method index [use r0, set r12]
472 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, methodIndex),
473 r12, kUnsignedHalf, INVALID_SREG);
474 // get this->clazz->vtable [use rLR, set rLR]
475 loadBaseDisp(cUnit, mir, rLR,
buzbeec143c552011-08-20 17:38:58 -0700476 OFFSETOF_MEMBER(Class, vtable), rLR, kWord,
buzbee67bf8852011-08-17 17:51:35 -0700477 INVALID_SREG);
478 break;
479 case 4: // get target Method* [use rLR, use r12, set r0]
480 loadBaseIndexed(cUnit, rLR, r12, r0, 2, kWord);
481 break;
482 case 5: // Get the target compiled code address [use r0, set rLR]
buzbeec143c552011-08-20 17:38:58 -0700483 UNIMPLEMENTED(FATAL) << "Update with new cache";
buzbee67bf8852011-08-17 17:51:35 -0700484 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
485 rLR, kWord, INVALID_SREG);
486 break;
487 default:
488 return -1;
489 }
buzbeec143c552011-08-20 17:38:58 -0700490#endif
buzbee67bf8852011-08-17 17:51:35 -0700491 return state + 1;
492}
493
buzbee7b1b86d2011-08-26 18:59:10 -0700494// Slow path sequence for virtual calls
495static int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
496 DecodedInstruction* dInsn, int state)
497{
498 RegLocation rlArg;
499 switch(state) {
500 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700501 loadCurrMethodDirect(cUnit, r0);
buzbee7b1b86d2011-08-26 18:59:10 -0700502 break;
503 case 1: // Get the current Method->DeclaringClass() [uses/sets r0]
504 loadBaseDisp(cUnit, mir, r0,
505 OFFSETOF_MEMBER(art::Method, declaring_class_),
506 r0, kWord, INVALID_SREG);
507 break;
508 case 2: // Method->DeclaringClass()->GetDexCache() [uses/sets r0]
509 loadBaseDisp(cUnit, mir, r0,
510 OFFSETOF_MEMBER(art::Class, dex_cache_), r0, kWord,
511 INVALID_SREG);
512 break;
513 case 3: // ...()->GetDexCache()->methodsObjectArr [uses/sets r0]
514 loadBaseDisp(cUnit, mir, r0,
Brian Carlstrom1caa2c22011-08-28 13:02:33 -0700515 art::DexCache::ResolvedMethodsOffset().Int32Value(), r0,
buzbee7b1b86d2011-08-26 18:59:10 -0700516 kWord, INVALID_SREG);
517 // Load "this" [set r1]
518 rlArg = oatGetSrc(cUnit, mir, 0);
519 loadValueDirectFixed(cUnit, rlArg, r1);
520 // Skip past the object header
521 opRegImm(cUnit, kOpAdd, r0, art::Array::DataOffset().Int32Value());
522 break;
523 case 4:
524 // Is "this" null? [use r1]
525 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
526 // get this->clazz [use r1, set rLR]
527 loadBaseDisp(cUnit, mir, r1, OFFSETOF_MEMBER(Object, klass_), rLR,
528 kWord, INVALID_SREG);
529 // Get the base Method* [uses r0, sets r0]
530 loadBaseDisp(cUnit, mir, r0, dInsn->vB * 4, r0,
531 kWord, INVALID_SREG);
532 // get this->clazz->vtable [use rLR, set rLR]
533 loadBaseDisp(cUnit, mir, rLR,
534 OFFSETOF_MEMBER(Class, vtable_), rLR, kWord,
535 INVALID_SREG);
536 // Get the method index [use r0, set r12]
537 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, method_index_),
538 r12, kUnsignedHalf, INVALID_SREG);
539 // Skip past the object header
540 opRegImm(cUnit, kOpAdd, rLR, art::Array::DataOffset().Int32Value());
541 // Get target Method*
542 loadBaseIndexed(cUnit, rLR, r12, r0, 2, kWord);
543 break;
544 case 5: // Get the target compiled code address [uses r0, sets rLR]
545 loadBaseDisp(cUnit, mir, r0, art::Method::GetCodeOffset(), rLR,
546 kWord, INVALID_SREG);
547 break;
548 default:
549 return -1;
550 }
551 return state + 1;
552}
553
buzbee67bf8852011-08-17 17:51:35 -0700554/* Load up to 3 arguments in r1..r3 */
555static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
556 DecodedInstruction* dInsn, int callState,
557 int *args, NextCallInsn nextCallInsn)
558{
559 for (int i = 0; i < 3; i++) {
560 if (args[i] != INVALID_REG) {
561 RegLocation rlArg = oatGetSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700562 // Arguments are treated as a series of untyped 32-bit values.
563 rlArg.wide = false;
buzbee67bf8852011-08-17 17:51:35 -0700564 loadValueDirectFixed(cUnit, rlArg, r1 + i);
565 callState = nextCallInsn(cUnit, mir, dInsn, callState);
566 }
567 }
568 return callState;
569}
570
571/*
572 * Interleave launch code for INVOKE_INTERFACE. The target is
573 * identified using artFindInterfaceMethodInCache(class, ref, method, dex)
574 * Note that we'll have to reload "this" following the helper call.
575 *
576 * FIXME: do we need to have artFindInterfaceMethodInCache return
577 * a NULL if not found so we can throw exception here? Otherwise,
578 * may need to pass some additional info to allow the helper function
579 * to throw on its own.
580 */
581static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
582 DecodedInstruction* dInsn, int state)
583{
buzbee1b4c8592011-08-31 10:43:51 -0700584 UNIMPLEMENTED(FATAL) << "Need findInterfaceMethodInCache";
buzbeec143c552011-08-20 17:38:58 -0700585#if 0
buzbee67bf8852011-08-17 17:51:35 -0700586 RegLocation rlArg;
587 switch(state) {
588 case 0:
589 // Load "this" [set r12]
590 rlArg = oatGetSrc(cUnit, mir, 0);
591 loadValueDirectFixed(cUnit, rlArg, r12);
592 // Get the current Method* [set arg2]
buzbeedfd3d702011-08-28 12:56:51 -0700593 loadCurrMethodDirect(cUnit, r2);
buzbee67bf8852011-08-17 17:51:35 -0700594 // Is "this" null? [use r12]
595 genNullCheck(cUnit, oatSSASrc(mir,0), r12,
596 mir->offset, NULL);
597 // Get curMethod->clazz [set arg3]
598 loadBaseDisp(cUnit, mir, r2, OFFSETOF_MEMBER(Method, clazz),
599 r3, kWord, INVALID_SREG);
600 // Load this->class [usr r12, set arg0]
buzbeec143c552011-08-20 17:38:58 -0700601 loadBaseDisp(cUnit, mir, r12, OFFSETOF_MEMBER(Class, clazz),
buzbee67bf8852011-08-17 17:51:35 -0700602 r3, kWord, INVALID_SREG);
603 // Load address of helper function
604 loadBaseDisp(cUnit, mir, rSELF,
605 OFFSETOF_MEMBER(Thread, pArtFindInterfaceMethodInCache),
606 rLR, kWord, INVALID_SREG);
607 // Get dvmDex
buzbeec143c552011-08-20 17:38:58 -0700608 loadBaseDisp(cUnit, mir, r3, OFFSETOF_MEMBER(Class, pDvmDex),
buzbee67bf8852011-08-17 17:51:35 -0700609 r3, kWord, INVALID_SREG);
610 // Load ref [set arg1]
611 loadConstant(cUnit, r1, dInsn->vB);
612 // Call out to helper, target Method returned in ret0
613 newLIR1(cUnit, kThumbBlxR, rLR);
614 break;
615 case 1: // Get the target compiled code address [use r0, set rLR]
616 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
617 rLR, kWord, INVALID_SREG);
618 default:
619 return -1;
620 }
buzbeec143c552011-08-20 17:38:58 -0700621#endif
buzbee67bf8852011-08-17 17:51:35 -0700622 return state + 1;
623}
624
625
626/*
627 * Interleave launch code for INVOKE_SUPER. See comments
628 * for nextVCallIns.
629 */
630static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
631 DecodedInstruction* dInsn, int state)
632{
buzbee1b4c8592011-08-31 10:43:51 -0700633 UNIMPLEMENTED(FATAL) << "Need INVOKE_SUPER implementation";
buzbeec143c552011-08-20 17:38:58 -0700634#if 0
buzbee67bf8852011-08-17 17:51:35 -0700635 RegLocation rlArg;
636 switch(state) {
637 case 0:
638 // Get the current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700639 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700640 // Load "this" [set r1]
641 rlArg = oatGetSrc(cUnit, mir, 0);
642 loadValueDirectFixed(cUnit, rlArg, r1);
643 // Get method->clazz [use r0, set r12]
644 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, clazz),
645 r12, kWord, INVALID_SREG);
646 // Get pResmethods [use r0, set rLR]
647 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
648 rLR, kWord, INVALID_SREG);
649 // Get clazz->super [use r12, set r12]
buzbeec143c552011-08-20 17:38:58 -0700650 loadBaseDisp(cUnit, mir, r12, OFFSETOF_MEMBER(Class, super),
buzbee67bf8852011-08-17 17:51:35 -0700651 r12, kWord, INVALID_SREG);
652 // Get base method [use rLR, set r0]
653 loadBaseDisp(cUnit, mir, rLR, dInsn->vB * 4, r0,
654 kWord, INVALID_SREG);
655 // Is "this" null? [use r1]
656 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
657 mir->offset, NULL);
658 // Get methodIndex [use r0, set rLR]
659 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, methodIndex),
660 rLR, kUnsignedHalf, INVALID_SREG);
661 // Get vtableCount [use r12, set r0]
662 loadBaseDisp(cUnit, mir, r12,
buzbeec143c552011-08-20 17:38:58 -0700663 OFFSETOF_MEMBER(Class, vtableCount),
buzbee67bf8852011-08-17 17:51:35 -0700664 r0, kWord, INVALID_SREG);
665 // Compare method index w/ vtable count [use r12, use rLR]
666 genRegRegCheck(cUnit, kArmCondGe, rLR, r0, mir->offset, NULL);
667 // get target Method* [use rLR, use r12, set r0]
668 loadBaseIndexed(cUnit, r0, r12, rLR, 2, kWord);
669 case 1: // Get the target compiled code address [use r0, set rLR]
670 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
671 rLR, kWord, INVALID_SREG);
672 default:
673 return -1;
674 }
buzbeec143c552011-08-20 17:38:58 -0700675#endif
buzbee67bf8852011-08-17 17:51:35 -0700676 return state + 1;
677}
678
679/*
680 * Load up to 5 arguments, the first three of which will be in
681 * r1 .. r3. On entry r0 contains the current method pointer,
682 * and as part of the load sequence, it must be replaced with
683 * the target method pointer. Note, this may also be called
684 * for "range" variants if the number of arguments is 5 or fewer.
685 */
686static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
687 DecodedInstruction* dInsn, int callState,
688 ArmLIR** pcrLabel, bool isRange,
689 NextCallInsn nextCallInsn)
690{
691 RegLocation rlArg;
692 int registerArgs[3];
693
694 /* If no arguments, just return */
695 if (dInsn->vA == 0)
696 return callState;
697
buzbee2e748f32011-08-29 21:02:19 -0700698 oatLockCallTemps(cUnit);
buzbee67bf8852011-08-17 17:51:35 -0700699 callState = nextCallInsn(cUnit, mir, dInsn, callState);
700
701 /*
702 * Load frame arguments arg4 & arg5 first. Coded a little odd to
703 * pre-schedule the method pointer target.
704 */
705 for (unsigned int i=3; i < dInsn->vA; i++) {
706 int reg;
707 int arg = (isRange) ? dInsn->vC + i : i;
708 rlArg = oatUpdateLoc(cUnit, oatGetSrc(cUnit, mir, arg));
709 if (rlArg.location == kLocPhysReg) {
710 reg = rlArg.lowReg;
711 } else {
712 reg = r1;
713 loadValueDirectFixed(cUnit, rlArg, r1);
714 callState = nextCallInsn(cUnit, mir, dInsn, callState);
715 }
716 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
717 callState = nextCallInsn(cUnit, mir, dInsn, callState);
718 }
719
720 /* Load register arguments r1..r3 */
721 for (unsigned int i = 0; i < 3; i++) {
722 if (i < dInsn->vA)
723 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
724 else
725 registerArgs[i] = INVALID_REG;
726 }
727 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
728 nextCallInsn);
729
730 // Load direct & need a "this" null check?
731 if (pcrLabel) {
732 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1,
733 mir->offset, NULL);
734 }
735 return callState;
736}
737
738/*
739 * May have 0+ arguments (also used for jumbo). Note that
740 * source virtual registers may be in physical registers, so may
741 * need to be flushed to home location before copying. This
742 * applies to arg3 and above (see below).
743 *
744 * Two general strategies:
745 * If < 20 arguments
746 * Pass args 3-18 using vldm/vstm block copy
747 * Pass arg0, arg1 & arg2 in r1-r3
748 * If 20+ arguments
749 * Pass args arg19+ using memcpy block copy
750 * Pass arg0, arg1 & arg2 in r1-r3
751 *
752 */
753static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
754 DecodedInstruction* dInsn, int callState,
755 ArmLIR** pcrLabel, NextCallInsn nextCallInsn)
756{
757 int firstArg = dInsn->vC;
758 int numArgs = dInsn->vA;
759
760 // If we can treat it as non-range (Jumbo ops will use range form)
761 if (numArgs <= 5)
762 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
763 true, nextCallInsn);
764 /*
765 * Make sure range list doesn't span the break between in normal
766 * Dalvik vRegs and the ins.
767 */
buzbee1b4c8592011-08-31 10:43:51 -0700768 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
769 int boundaryReg = cUnit->method->num_registers_ - cUnit->method->num_ins_;
770 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
771 LOG(FATAL) << "Argument list spanned locals & args";
buzbee67bf8852011-08-17 17:51:35 -0700772 }
773
774 /*
775 * First load the non-register arguments. Both forms expect all
776 * of the source arguments to be in their home frame location, so
777 * scan the sReg names and flush any that have been promoted to
778 * frame backing storage.
779 */
780 // Scan the rest of the args - if in physReg flush to memory
781 for (int i = 4; i < numArgs; i++) {
buzbee1b4c8592011-08-31 10:43:51 -0700782 RegLocation loc = oatGetSrc(cUnit, mir, i);
783 //TODO: generic loc flushing routine
784 if (loc.wide) {
785 loc = oatUpdateLocWide(cUnit, loc);
786 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
787 storeBaseDispWide(cUnit, rSP, loc.spOffset, loc.lowReg,
788 loc.highReg);
789 callState = nextCallInsn(cUnit, mir, dInsn, callState);
790 }
791 } else {
792 loc = oatUpdateLoc(cUnit, loc);
793 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
794 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
795 callState = nextCallInsn(cUnit, mir, dInsn, callState);
796 }
buzbee67bf8852011-08-17 17:51:35 -0700797 }
798 }
799
800 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
801 int outsOffset = 4 /* Method* */ + (3 * 4);
802 if (numArgs >= 20) {
803 // Generate memcpy, but first make sure all of
804 opRegRegImm(cUnit, kOpAdd, r0, rSP, startOffset);
805 opRegRegImm(cUnit, kOpAdd, r1, rSP, outsOffset);
806 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
807 loadConstant(cUnit, r2, (numArgs - 3) * 4);
808 newLIR1(cUnit, kThumbBlxR, rLR);
809 } else {
810 // Use vldm/vstm pair using r3 as a temp
buzbeec143c552011-08-20 17:38:58 -0700811 int regsLeft = std::min(numArgs - 3, 16);
buzbee67bf8852011-08-17 17:51:35 -0700812 callState = nextCallInsn(cUnit, mir, dInsn, callState);
813 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
buzbee1b4c8592011-08-31 10:43:51 -0700814 newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
buzbee67bf8852011-08-17 17:51:35 -0700815 callState = nextCallInsn(cUnit, mir, dInsn, callState);
816 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
817 callState = nextCallInsn(cUnit, mir, dInsn, callState);
buzbee1b4c8592011-08-31 10:43:51 -0700818 newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
buzbee67bf8852011-08-17 17:51:35 -0700819 callState = nextCallInsn(cUnit, mir, dInsn, callState);
820 }
821
822 // Handle the 1st 3 in r1, r2 & r3
823 for (unsigned int i = 0; i < dInsn->vA && i < 3; i++) {
824 RegLocation loc = oatGetSrc(cUnit, mir, firstArg + i);
825 loadValueDirectFixed(cUnit, loc, r1 + i);
826 callState = nextCallInsn(cUnit, mir, dInsn, callState);
827 }
828
829 // Finally, deal with the register arguments
830 // We'll be using fixed registers here
buzbee2e748f32011-08-29 21:02:19 -0700831 oatLockCallTemps(cUnit);
buzbee67bf8852011-08-17 17:51:35 -0700832 callState = nextCallInsn(cUnit, mir, dInsn, callState);
833 return callState;
834}
835
836static void genInvokeStatic(CompilationUnit* cUnit, MIR* mir)
837{
838 DecodedInstruction* dInsn = &mir->dalvikInsn;
839 int callState = 0;
buzbeec5ef0462011-08-25 18:44:49 -0700840 int fastPath = false; // TODO: set based on resolution results
841
842 NextCallInsn nextCallInsn = fastPath ? nextSDCallInsn : nextSDCallInsnSP;
843
buzbee67bf8852011-08-17 17:51:35 -0700844 if (mir->dalvikInsn.opcode == OP_INVOKE_STATIC) {
845 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, NULL,
buzbeec5ef0462011-08-25 18:44:49 -0700846 false, nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700847 } else {
848 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, NULL,
buzbeec5ef0462011-08-25 18:44:49 -0700849 nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700850 }
851 // Finish up any of the call sequence not interleaved in arg loading
852 while (callState >= 0) {
buzbeec5ef0462011-08-25 18:44:49 -0700853 callState = nextCallInsn(cUnit, mir, dInsn, callState);
buzbee67bf8852011-08-17 17:51:35 -0700854 }
855 newLIR1(cUnit, kThumbBlxR, rLR);
856}
857
858static void genInvokeDirect(CompilationUnit* cUnit, MIR* mir)
859{
860 DecodedInstruction* dInsn = &mir->dalvikInsn;
861 int callState = 0;
862 ArmLIR* nullCk;
buzbee7b1b86d2011-08-26 18:59:10 -0700863 int fastPath = false; // TODO: set based on resolution results
864
865 NextCallInsn nextCallInsn = fastPath ? nextSDCallInsn : nextSDCallInsnSP;
buzbee67bf8852011-08-17 17:51:35 -0700866 if (mir->dalvikInsn.opcode == OP_INVOKE_DIRECT)
867 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700868 false, nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700869 else
870 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700871 nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700872 // Finish up any of the call sequence not interleaved in arg loading
873 while (callState >= 0) {
buzbee7b1b86d2011-08-26 18:59:10 -0700874 callState = nextCallInsn(cUnit, mir, dInsn, callState);
buzbee67bf8852011-08-17 17:51:35 -0700875 }
876 newLIR1(cUnit, kThumbBlxR, rLR);
877}
878
879static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
880{
881 DecodedInstruction* dInsn = &mir->dalvikInsn;
882 int callState = 0;
883 ArmLIR* nullCk;
884 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
885 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState);
886 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
887 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
888 false, nextInterfaceCallInsn);
889 else
890 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
891 nextInterfaceCallInsn);
892 // Finish up any of the call sequence not interleaved in arg loading
893 while (callState >= 0) {
894 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState);
895 }
896 newLIR1(cUnit, kThumbBlxR, rLR);
897}
898
899static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
900{
901 DecodedInstruction* dInsn = &mir->dalvikInsn;
902 int callState = 0;
903 ArmLIR* nullCk;
904// FIXME - redundantly loading arg0/r1 ("this")
905 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
906 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
907 false, nextSuperCallInsn);
908 else
909 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
910 nextSuperCallInsn);
911 // Finish up any of the call sequence not interleaved in arg loading
912 while (callState >= 0) {
913 callState = nextSuperCallInsn(cUnit, mir, dInsn, callState);
914 }
915 newLIR1(cUnit, kThumbBlxR, rLR);
916}
917
918static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
919{
920 DecodedInstruction* dInsn = &mir->dalvikInsn;
921 int callState = 0;
922 ArmLIR* nullCk;
buzbee7b1b86d2011-08-26 18:59:10 -0700923 int fastPath = false; // TODO: set based on resolution results
924
925 NextCallInsn nextCallInsn = fastPath ? nextVCallInsn : nextVCallInsnSP;
926 // TODO - redundantly loading arg0/r1 ("this")
buzbee67bf8852011-08-17 17:51:35 -0700927 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
928 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700929 false, nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700930 else
931 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700932 nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700933 // Finish up any of the call sequence not interleaved in arg loading
934 while (callState >= 0) {
buzbee7b1b86d2011-08-26 18:59:10 -0700935 callState = nextCallInsn(cUnit, mir, dInsn, callState);
buzbee67bf8852011-08-17 17:51:35 -0700936 }
937 newLIR1(cUnit, kThumbBlxR, rLR);
938}
939
940// TODO: break out the case handlers. Might make it easier to support x86
941static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
942 BasicBlock* bb, ArmLIR* labelList)
943{
944 bool res = false; // Assume success
945 RegLocation rlSrc[3];
946 RegLocation rlDest = badLoc;
947 RegLocation rlResult = badLoc;
948 Opcode opcode = mir->dalvikInsn.opcode;
949
950 /* Prep Src and Dest locations */
951 int nextSreg = 0;
952 int nextLoc = 0;
953 int attrs = oatDataFlowAttributes[opcode];
954 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
955 if (attrs & DF_UA) {
956 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
957 nextSreg++;
958 } else if (attrs & DF_UA_WIDE) {
959 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
960 nextSreg + 1);
961 nextSreg+= 2;
962 }
963 if (attrs & DF_UB) {
964 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
965 nextSreg++;
966 } else if (attrs & DF_UB_WIDE) {
967 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
968 nextSreg + 1);
969 nextSreg+= 2;
970 }
971 if (attrs & DF_UC) {
972 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
973 } else if (attrs & DF_UC_WIDE) {
974 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
975 nextSreg + 1);
976 }
977 if (attrs & DF_DA) {
978 rlDest = oatGetDest(cUnit, mir, 0);
979 } else if (attrs & DF_DA_WIDE) {
980 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
981 }
982
983 switch(opcode) {
984 case OP_NOP:
985 break;
986
987 case OP_MOVE_EXCEPTION:
988 int exOffset;
989 int resetReg;
buzbeec143c552011-08-20 17:38:58 -0700990 exOffset = Thread::ExceptionOffset().Int32Value();
buzbee67bf8852011-08-17 17:51:35 -0700991 resetReg = oatAllocTemp(cUnit);
992 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
993 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
994 loadConstant(cUnit, resetReg, 0);
995 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
996 storeValue(cUnit, rlDest, rlResult);
997 break;
998
999 case OP_RETURN_VOID:
1000 break;
1001
1002 case OP_RETURN:
1003 case OP_RETURN_OBJECT:
1004 storeValue(cUnit, retLoc, rlSrc[0]);
1005 break;
1006
1007 case OP_RETURN_WIDE:
1008 rlDest = retLocWide;
1009 rlDest.fp = rlSrc[0].fp;
1010 storeValueWide(cUnit, rlDest, rlSrc[0]);
1011 break;
1012
1013 case OP_MOVE_RESULT_WIDE:
1014 if (mir->OptimizationFlags & MIR_INLINED)
1015 break; // Nop - combined w/ previous invoke
1016 /*
1017 * Somewhat hacky here. Because we're now passing
1018 * return values in registers, we have to let the
1019 * register allocation utilities know that the return
1020 * registers are live and may not be used for address
1021 * formation in storeValueWide.
1022 */
1023 assert(retLocWide.lowReg == r0);
1024 assert(retLocWide.lowReg == r1);
1025 oatLockTemp(cUnit, retLocWide.lowReg);
1026 oatLockTemp(cUnit, retLocWide.highReg);
1027 storeValueWide(cUnit, rlDest, retLocWide);
1028 oatFreeTemp(cUnit, retLocWide.lowReg);
1029 oatFreeTemp(cUnit, retLocWide.highReg);
1030 break;
1031
1032 case OP_MOVE_RESULT:
1033 case OP_MOVE_RESULT_OBJECT:
1034 if (mir->OptimizationFlags & MIR_INLINED)
1035 break; // Nop - combined w/ previous invoke
1036 /* See comment for OP_MOVE_RESULT_WIDE */
1037 assert(retLoc.lowReg == r0);
1038 oatLockTemp(cUnit, retLoc.lowReg);
1039 storeValue(cUnit, rlDest, retLoc);
1040 oatFreeTemp(cUnit, retLoc.lowReg);
1041 break;
1042
1043 case OP_MOVE:
1044 case OP_MOVE_OBJECT:
1045 case OP_MOVE_16:
1046 case OP_MOVE_OBJECT_16:
1047 case OP_MOVE_FROM16:
1048 case OP_MOVE_OBJECT_FROM16:
1049 storeValue(cUnit, rlDest, rlSrc[0]);
1050 break;
1051
1052 case OP_MOVE_WIDE:
1053 case OP_MOVE_WIDE_16:
1054 case OP_MOVE_WIDE_FROM16:
1055 storeValueWide(cUnit, rlDest, rlSrc[0]);
1056 break;
1057
1058 case OP_CONST:
1059 case OP_CONST_4:
1060 case OP_CONST_16:
1061 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1062 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1063 storeValue(cUnit, rlDest, rlResult);
1064 break;
1065
1066 case OP_CONST_HIGH16:
1067 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1068 loadConstantNoClobber(cUnit, rlResult.lowReg,
1069 mir->dalvikInsn.vB << 16);
1070 storeValue(cUnit, rlDest, rlResult);
1071 break;
1072
1073 case OP_CONST_WIDE_16:
1074 case OP_CONST_WIDE_32:
1075 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1076 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1077 //TUNING: do high separately to avoid load dependency
1078 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
1079 storeValueWide(cUnit, rlDest, rlResult);
1080 break;
1081
1082 case OP_CONST_WIDE:
1083 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1084 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
buzbee54330722011-08-23 16:46:55 -07001085 mir->dalvikInsn.vB_wide & 0xffffffff,
1086 (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
buzbee3ea4ec52011-08-22 17:37:19 -07001087 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001088 break;
1089
1090 case OP_CONST_WIDE_HIGH16:
1091 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1092 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
1093 0, mir->dalvikInsn.vB << 16);
buzbee7b1b86d2011-08-26 18:59:10 -07001094 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001095 break;
1096
1097 case OP_MONITOR_ENTER:
1098 genMonitorEnter(cUnit, mir, rlSrc[0]);
1099 break;
1100
1101 case OP_MONITOR_EXIT:
1102 genMonitorExit(cUnit, mir, rlSrc[0]);
1103 break;
1104
1105 case OP_CHECK_CAST:
1106 genCheckCast(cUnit, mir, rlSrc[0]);
1107 break;
1108
1109 case OP_INSTANCE_OF:
1110 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
1111 break;
1112
1113 case OP_NEW_INSTANCE:
1114 genNewInstance(cUnit, mir, rlDest);
1115 break;
1116
1117 case OP_THROW:
1118 genThrow(cUnit, mir, rlSrc[0]);
1119 break;
1120
1121 case OP_ARRAY_LENGTH:
1122 int lenOffset;
buzbeec143c552011-08-20 17:38:58 -07001123 lenOffset = Array::LengthOffset().Int32Value();
buzbee7b1b86d2011-08-26 18:59:10 -07001124 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
buzbee67bf8852011-08-17 17:51:35 -07001125 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg,
1126 mir->offset, NULL);
1127 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1128 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
1129 rlResult.lowReg);
1130 storeValue(cUnit, rlDest, rlResult);
1131 break;
1132
1133 case OP_CONST_STRING:
1134 case OP_CONST_STRING_JUMBO:
1135 genConstString(cUnit, mir, rlDest, rlSrc[0]);
1136 break;
1137
1138 case OP_CONST_CLASS:
1139 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
1140 break;
1141
1142 case OP_FILL_ARRAY_DATA:
1143 genFillArrayData(cUnit, mir, rlSrc[0]);
1144 break;
1145
1146 case OP_FILLED_NEW_ARRAY:
1147 genFilledNewArray(cUnit, mir, false /* not range */);
1148 break;
1149
1150 case OP_FILLED_NEW_ARRAY_RANGE:
1151 genFilledNewArray(cUnit, mir, true /* range */);
1152 break;
1153
1154 case OP_NEW_ARRAY:
1155 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
1156 break;
1157
1158 case OP_GOTO:
1159 case OP_GOTO_16:
1160 case OP_GOTO_32:
1161 // TUNING: add MIR flag to disable when unnecessary
1162 bool backwardBranch;
1163 backwardBranch = (bb->taken->startOffset <= mir->offset);
1164 if (backwardBranch) {
1165 genSuspendPoll(cUnit, mir);
1166 }
1167 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1168 break;
1169
1170 case OP_PACKED_SWITCH:
1171 genPackedSwitch(cUnit, mir, rlSrc[0]);
1172 break;
1173
1174 case OP_SPARSE_SWITCH:
1175 genSparseSwitch(cUnit, mir, rlSrc[0]);
1176 break;
1177
1178 case OP_CMPL_FLOAT:
1179 case OP_CMPG_FLOAT:
1180 case OP_CMPL_DOUBLE:
1181 case OP_CMPG_DOUBLE:
1182 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1183 break;
1184
1185 case OP_CMP_LONG:
1186 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1187 break;
1188
1189 case OP_IF_EQ:
1190 case OP_IF_NE:
1191 case OP_IF_LT:
1192 case OP_IF_GE:
1193 case OP_IF_GT:
1194 case OP_IF_LE: {
1195 bool backwardBranch;
1196 ArmConditionCode cond;
1197 backwardBranch = (bb->taken->startOffset <= mir->offset);
1198 if (backwardBranch) {
1199 genSuspendPoll(cUnit, mir);
1200 }
1201 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1202 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1203 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1204 switch(opcode) {
1205 case OP_IF_EQ:
1206 cond = kArmCondEq;
1207 break;
1208 case OP_IF_NE:
1209 cond = kArmCondNe;
1210 break;
1211 case OP_IF_LT:
1212 cond = kArmCondLt;
1213 break;
1214 case OP_IF_GE:
1215 cond = kArmCondGe;
1216 break;
1217 case OP_IF_GT:
1218 cond = kArmCondGt;
1219 break;
1220 case OP_IF_LE:
1221 cond = kArmCondLe;
1222 break;
1223 default:
1224 cond = (ArmConditionCode)0;
1225 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1226 }
1227 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1228 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1229 break;
1230 }
1231
1232 case OP_IF_EQZ:
1233 case OP_IF_NEZ:
1234 case OP_IF_LTZ:
1235 case OP_IF_GEZ:
1236 case OP_IF_GTZ:
1237 case OP_IF_LEZ: {
1238 bool backwardBranch;
1239 ArmConditionCode cond;
1240 backwardBranch = (bb->taken->startOffset <= mir->offset);
1241 if (backwardBranch) {
1242 genSuspendPoll(cUnit, mir);
1243 }
1244 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1245 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1246 switch(opcode) {
1247 case OP_IF_EQZ:
1248 cond = kArmCondEq;
1249 break;
1250 case OP_IF_NEZ:
1251 cond = kArmCondNe;
1252 break;
1253 case OP_IF_LTZ:
1254 cond = kArmCondLt;
1255 break;
1256 case OP_IF_GEZ:
1257 cond = kArmCondGe;
1258 break;
1259 case OP_IF_GTZ:
1260 cond = kArmCondGt;
1261 break;
1262 case OP_IF_LEZ:
1263 cond = kArmCondLe;
1264 break;
1265 default:
1266 cond = (ArmConditionCode)0;
1267 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1268 }
1269 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1270 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1271 break;
1272 }
1273
1274 case OP_AGET_WIDE:
1275 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1276 break;
1277 case OP_AGET:
1278 case OP_AGET_OBJECT:
1279 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1280 break;
1281 case OP_AGET_BOOLEAN:
1282 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1283 rlDest, 0);
1284 break;
1285 case OP_AGET_BYTE:
1286 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1287 break;
1288 case OP_AGET_CHAR:
1289 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1290 rlDest, 1);
1291 break;
1292 case OP_AGET_SHORT:
1293 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1294 break;
1295 case OP_APUT_WIDE:
1296 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1297 break;
1298 case OP_APUT:
1299 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1300 break;
1301 case OP_APUT_OBJECT:
buzbee1b4c8592011-08-31 10:43:51 -07001302 genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
buzbee67bf8852011-08-17 17:51:35 -07001303 break;
1304 case OP_APUT_SHORT:
1305 case OP_APUT_CHAR:
1306 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1307 rlSrc[0], 1);
1308 break;
1309 case OP_APUT_BYTE:
1310 case OP_APUT_BOOLEAN:
1311 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1312 rlSrc[0], 0);
1313 break;
1314
1315 case OP_IGET_WIDE:
1316 case OP_IGET_WIDE_VOLATILE:
1317 genIGetWideX(cUnit, mir, rlDest, rlSrc[0]);
1318 break;
1319
1320 case OP_IGET:
1321 case OP_IGET_VOLATILE:
1322 case OP_IGET_OBJECT:
1323 case OP_IGET_OBJECT_VOLATILE:
1324 genIGetX(cUnit, mir, kWord, rlDest, rlSrc[0]);
1325 break;
1326
1327 case OP_IGET_BOOLEAN:
1328 case OP_IGET_BYTE:
1329 genIGetX(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
1330 break;
1331
1332 case OP_IGET_CHAR:
1333 genIGetX(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
1334 break;
1335
1336 case OP_IGET_SHORT:
1337 genIGetX(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
1338 break;
1339
1340 case OP_IPUT_WIDE:
1341 case OP_IPUT_WIDE_VOLATILE:
1342 genIPutWideX(cUnit, mir, rlSrc[0], rlSrc[1]);
1343 break;
1344
1345 case OP_IPUT_OBJECT:
1346 case OP_IPUT_OBJECT_VOLATILE:
1347 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
1348 break;
1349
1350 case OP_IPUT:
1351 case OP_IPUT_VOLATILE:
1352 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
1353 break;
1354
1355 case OP_IPUT_BOOLEAN:
1356 case OP_IPUT_BYTE:
1357 genIPutX(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
1358 break;
1359
1360 case OP_IPUT_CHAR:
1361 genIPutX(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
1362 break;
1363
1364 case OP_IPUT_SHORT:
1365 genIPutX(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
1366 break;
1367
1368 case OP_SGET:
1369 case OP_SGET_OBJECT:
1370 case OP_SGET_BOOLEAN:
1371 case OP_SGET_BYTE:
1372 case OP_SGET_CHAR:
1373 case OP_SGET_SHORT:
1374 genSget(cUnit, mir, rlResult, rlDest);
1375 break;
1376
1377 case OP_SGET_WIDE:
1378 genSgetWide(cUnit, mir, rlResult, rlDest);
1379 break;
1380
1381 case OP_SPUT:
1382 case OP_SPUT_OBJECT:
1383 case OP_SPUT_BOOLEAN:
1384 case OP_SPUT_BYTE:
1385 case OP_SPUT_CHAR:
1386 case OP_SPUT_SHORT:
1387 genSput(cUnit, mir, rlSrc[0]);
1388 break;
1389
1390 case OP_SPUT_WIDE:
1391 genSputWide(cUnit, mir, rlSrc[0]);
1392 break;
1393
1394 case OP_INVOKE_STATIC_RANGE:
1395 case OP_INVOKE_STATIC:
1396 genInvokeStatic(cUnit, mir);
1397 break;
1398
1399 case OP_INVOKE_DIRECT:
1400 case OP_INVOKE_DIRECT_RANGE:
1401 genInvokeDirect(cUnit, mir);
1402 break;
1403
1404 case OP_INVOKE_VIRTUAL:
1405 case OP_INVOKE_VIRTUAL_RANGE:
1406 genInvokeVirtual(cUnit, mir);
1407 break;
1408
1409 case OP_INVOKE_SUPER:
1410 case OP_INVOKE_SUPER_RANGE:
1411 genInvokeSuper(cUnit, mir);
1412 break;
1413
1414 case OP_INVOKE_INTERFACE:
1415 case OP_INVOKE_INTERFACE_RANGE:
1416 genInvokeInterface(cUnit, mir);
1417 break;
1418
1419 case OP_NEG_INT:
1420 case OP_NOT_INT:
1421 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1422 break;
1423
1424 case OP_NEG_LONG:
1425 case OP_NOT_LONG:
1426 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1427 break;
1428
1429 case OP_NEG_FLOAT:
1430 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1431 break;
1432
1433 case OP_NEG_DOUBLE:
1434 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1435 break;
1436
1437 case OP_INT_TO_LONG:
1438 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1439 if (rlSrc[0].location == kLocPhysReg) {
1440 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1441 } else {
1442 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1443 }
1444 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1445 rlResult.lowReg, 31);
1446 storeValueWide(cUnit, rlDest, rlResult);
1447 break;
1448
1449 case OP_LONG_TO_INT:
1450 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1451 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1452 storeValue(cUnit, rlDest, rlSrc[0]);
1453 break;
1454
1455 case OP_INT_TO_BYTE:
1456 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1457 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1458 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1459 storeValue(cUnit, rlDest, rlResult);
1460 break;
1461
1462 case OP_INT_TO_SHORT:
1463 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1464 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1465 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1466 storeValue(cUnit, rlDest, rlResult);
1467 break;
1468
1469 case OP_INT_TO_CHAR:
1470 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1471 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1472 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1473 storeValue(cUnit, rlDest, rlResult);
1474 break;
1475
1476 case OP_INT_TO_FLOAT:
1477 case OP_INT_TO_DOUBLE:
1478 case OP_LONG_TO_FLOAT:
1479 case OP_LONG_TO_DOUBLE:
1480 case OP_FLOAT_TO_INT:
1481 case OP_FLOAT_TO_LONG:
1482 case OP_FLOAT_TO_DOUBLE:
1483 case OP_DOUBLE_TO_INT:
1484 case OP_DOUBLE_TO_LONG:
1485 case OP_DOUBLE_TO_FLOAT:
1486 genConversion(cUnit, mir);
1487 break;
1488
1489 case OP_ADD_INT:
1490 case OP_SUB_INT:
1491 case OP_MUL_INT:
1492 case OP_DIV_INT:
1493 case OP_REM_INT:
1494 case OP_AND_INT:
1495 case OP_OR_INT:
1496 case OP_XOR_INT:
1497 case OP_SHL_INT:
1498 case OP_SHR_INT:
1499 case OP_USHR_INT:
1500 case OP_ADD_INT_2ADDR:
1501 case OP_SUB_INT_2ADDR:
1502 case OP_MUL_INT_2ADDR:
1503 case OP_DIV_INT_2ADDR:
1504 case OP_REM_INT_2ADDR:
1505 case OP_AND_INT_2ADDR:
1506 case OP_OR_INT_2ADDR:
1507 case OP_XOR_INT_2ADDR:
1508 case OP_SHL_INT_2ADDR:
1509 case OP_SHR_INT_2ADDR:
1510 case OP_USHR_INT_2ADDR:
1511 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1512 break;
1513
1514 case OP_ADD_LONG:
1515 case OP_SUB_LONG:
1516 case OP_MUL_LONG:
1517 case OP_DIV_LONG:
1518 case OP_REM_LONG:
1519 case OP_AND_LONG:
1520 case OP_OR_LONG:
1521 case OP_XOR_LONG:
1522 case OP_ADD_LONG_2ADDR:
1523 case OP_SUB_LONG_2ADDR:
1524 case OP_MUL_LONG_2ADDR:
1525 case OP_DIV_LONG_2ADDR:
1526 case OP_REM_LONG_2ADDR:
1527 case OP_AND_LONG_2ADDR:
1528 case OP_OR_LONG_2ADDR:
1529 case OP_XOR_LONG_2ADDR:
1530 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1531 break;
1532
buzbee67bf8852011-08-17 17:51:35 -07001533 case OP_SHL_LONG:
1534 case OP_SHR_LONG:
1535 case OP_USHR_LONG:
buzbeee6d61962011-08-27 11:58:19 -07001536 case OP_SHL_LONG_2ADDR:
1537 case OP_SHR_LONG_2ADDR:
1538 case OP_USHR_LONG_2ADDR:
buzbee67bf8852011-08-17 17:51:35 -07001539 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1540 break;
1541
1542 case OP_ADD_FLOAT:
1543 case OP_SUB_FLOAT:
1544 case OP_MUL_FLOAT:
1545 case OP_DIV_FLOAT:
1546 case OP_REM_FLOAT:
1547 case OP_ADD_FLOAT_2ADDR:
1548 case OP_SUB_FLOAT_2ADDR:
1549 case OP_MUL_FLOAT_2ADDR:
1550 case OP_DIV_FLOAT_2ADDR:
1551 case OP_REM_FLOAT_2ADDR:
1552 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1553 break;
1554
1555 case OP_ADD_DOUBLE:
1556 case OP_SUB_DOUBLE:
1557 case OP_MUL_DOUBLE:
1558 case OP_DIV_DOUBLE:
1559 case OP_REM_DOUBLE:
1560 case OP_ADD_DOUBLE_2ADDR:
1561 case OP_SUB_DOUBLE_2ADDR:
1562 case OP_MUL_DOUBLE_2ADDR:
1563 case OP_DIV_DOUBLE_2ADDR:
1564 case OP_REM_DOUBLE_2ADDR:
1565 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1566 break;
1567
1568 case OP_RSUB_INT:
1569 case OP_ADD_INT_LIT16:
1570 case OP_MUL_INT_LIT16:
1571 case OP_DIV_INT_LIT16:
1572 case OP_REM_INT_LIT16:
1573 case OP_AND_INT_LIT16:
1574 case OP_OR_INT_LIT16:
1575 case OP_XOR_INT_LIT16:
1576 case OP_ADD_INT_LIT8:
1577 case OP_RSUB_INT_LIT8:
1578 case OP_MUL_INT_LIT8:
1579 case OP_DIV_INT_LIT8:
1580 case OP_REM_INT_LIT8:
1581 case OP_AND_INT_LIT8:
1582 case OP_OR_INT_LIT8:
1583 case OP_XOR_INT_LIT8:
1584 case OP_SHL_INT_LIT8:
1585 case OP_SHR_INT_LIT8:
1586 case OP_USHR_INT_LIT8:
1587 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1588 break;
1589
1590 default:
1591 res = true;
1592 }
1593 return res;
1594}
1595
1596static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1597 "kMirOpPhi",
1598 "kMirOpNullNRangeUpCheck",
1599 "kMirOpNullNRangeDownCheck",
1600 "kMirOpLowerBound",
1601 "kMirOpPunt",
1602 "kMirOpCheckInlinePrediction",
1603};
1604
1605/* Extended MIR instructions like PHI */
1606static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1607{
1608 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1609 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1610 strcpy(msg, extendedMIROpNames[opOffset]);
1611 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1612
1613 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1614 case kMirOpPhi: {
1615 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1616 op->flags.isNop = true;
1617 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1618 break;
1619 }
1620 default:
1621 break;
1622 }
1623}
1624
1625/* If there are any ins passed in registers that have not been promoted
1626 * to a callee-save register, flush them to the frame.
buzbeedfd3d702011-08-28 12:56:51 -07001627 * Note: at this pointCopy any ins that are passed in register to their
1628 * home location */
buzbee67bf8852011-08-17 17:51:35 -07001629static void flushIns(CompilationUnit* cUnit)
1630{
buzbeec143c552011-08-20 17:38:58 -07001631 if (cUnit->method->num_ins_ == 0)
buzbee67bf8852011-08-17 17:51:35 -07001632 return;
buzbeec143c552011-08-20 17:38:58 -07001633 int inRegs = (cUnit->method->num_ins_ > 2) ? 3 : cUnit->method->num_ins_;
buzbee67bf8852011-08-17 17:51:35 -07001634 int startReg = r1;
buzbeec143c552011-08-20 17:38:58 -07001635 int startLoc = cUnit->method->num_registers_ - cUnit->method->num_ins_;
buzbee67bf8852011-08-17 17:51:35 -07001636 for (int i = 0; i < inRegs; i++) {
1637 RegLocation loc = cUnit->regLocation[startLoc + i];
buzbeedfd3d702011-08-28 12:56:51 -07001638 //TUNING: be smarter about flushing ins to frame
1639 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
buzbee67bf8852011-08-17 17:51:35 -07001640 if (loc.location == kLocPhysReg) {
1641 genRegCopy(cUnit, loc.lowReg, startReg + i);
buzbee67bf8852011-08-17 17:51:35 -07001642 }
1643 }
1644
1645 // Handle special case of wide argument half in regs, half in frame
1646 if (inRegs == 3) {
1647 RegLocation loc = cUnit->regLocation[startLoc + 2];
1648 if (loc.wide && loc.location == kLocPhysReg) {
1649 // Load the other half of the arg into the promoted pair
1650 loadBaseDisp(cUnit, NULL, rSP, loc.spOffset+4,
1651 loc.highReg, kWord, INVALID_SREG);
1652 inRegs++;
1653 }
1654 }
1655
1656 // Now, do initial assignment of all promoted arguments passed in frame
buzbeec143c552011-08-20 17:38:58 -07001657 for (int i = inRegs; i < cUnit->method->num_ins_;) {
buzbee67bf8852011-08-17 17:51:35 -07001658 RegLocation loc = cUnit->regLocation[startLoc + i];
1659 if (loc.fpLocation == kLocPhysReg) {
1660 loc.location = kLocPhysReg;
1661 loc.fp = true;
1662 loc.lowReg = loc.fpLowReg;
1663 loc.highReg = loc.fpHighReg;
1664 }
1665 if (loc.location == kLocPhysReg) {
1666 if (loc.wide) {
1667 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1668 loc.lowReg, loc.highReg, INVALID_SREG);
1669 i++;
1670 } else {
1671 loadBaseDisp(cUnit, NULL, rSP, loc.spOffset,
1672 loc.lowReg, kWord, INVALID_SREG);
1673 }
1674 }
1675 i++;
1676 }
1677}
1678
1679/* Handle the content in each basic block */
1680static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1681{
1682 MIR* mir;
1683 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1684 int blockId = bb->id;
1685
1686 cUnit->curBlock = bb;
1687 labelList[blockId].operands[0] = bb->startOffset;
1688
1689 /* Insert the block label */
1690 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1691 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1692
1693 oatClobberAllRegs(cUnit);
1694 oatResetNullCheck(cUnit);
1695
1696 ArmLIR* headLIR = NULL;
1697
1698 if (bb->blockType == kEntryBlock) {
1699 /*
1700 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1701 * mechanism know so it doesn't try to use any of them when
1702 * expanding the frame or flushing. This leaves the utility
1703 * code with a single temp: r12. This should be enough.
1704 */
1705 oatLockTemp(cUnit, r0);
1706 oatLockTemp(cUnit, r1);
1707 oatLockTemp(cUnit, r2);
1708 oatLockTemp(cUnit, r3);
1709 newLIR0(cUnit, kArmPseudoMethodEntry);
1710 /* Spill core callee saves */
1711 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1712 /* Need to spill any FP regs? */
1713 if (cUnit->numFPSpills) {
1714 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1715 }
1716 opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1717 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1718 flushIns(cUnit);
1719 oatFreeTemp(cUnit, r0);
1720 oatFreeTemp(cUnit, r1);
1721 oatFreeTemp(cUnit, r2);
1722 oatFreeTemp(cUnit, r3);
1723 } else if (bb->blockType == kExitBlock) {
1724 newLIR0(cUnit, kArmPseudoMethodExit);
1725 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1726 /* Need to restore any FP callee saves? */
1727 if (cUnit->numFPSpills) {
1728 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1729 }
1730 if (cUnit->coreSpillMask & (1 << rLR)) {
1731 /* Unspill rLR to rPC */
1732 cUnit->coreSpillMask &= ~(1 << rLR);
1733 cUnit->coreSpillMask |= (1 << rPC);
1734 }
1735 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1736 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1737 /* We didn't pop to rPC, so must do a bv rLR */
1738 newLIR1(cUnit, kThumbBx, rLR);
1739 }
1740 }
1741
1742 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1743
1744 oatResetRegPool(cUnit);
1745 if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
1746 oatClobberAllRegs(cUnit);
1747 }
1748
1749 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1750 oatResetDefTracking(cUnit);
1751 }
1752
1753 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1754 handleExtendedMethodMIR(cUnit, mir);
1755 continue;
1756 }
1757
1758 cUnit->currentDalvikOffset = mir->offset;
1759
1760 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1761 InstructionFormat dalvikFormat =
1762 dexGetFormatFromOpcode(dalvikOpcode);
1763
1764 ArmLIR* boundaryLIR;
1765
1766 /* Mark the beginning of a Dalvik instruction for line tracking */
1767 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1768 (int) oatGetDalvikDisassembly(
1769 &mir->dalvikInsn, ""));
1770 /* Remember the first LIR for this block */
1771 if (headLIR == NULL) {
1772 headLIR = boundaryLIR;
1773 /* Set the first boundaryLIR as a scheduling barrier */
1774 headLIR->defMask = ENCODE_ALL;
1775 }
1776
1777 /* Don't generate the SSA annotation unless verbose mode is on */
1778 if (cUnit->printMe && mir->ssaRep) {
1779 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1780 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1781 }
1782
1783 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1784
1785 if (notHandled) {
1786 char buf[100];
1787 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1788 mir->offset,
1789 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1790 dalvikFormat);
1791 LOG(FATAL) << buf;
1792 }
1793 }
1794
1795 if (headLIR) {
1796 /*
1797 * Eliminate redundant loads/stores and delay stores into later
1798 * slots
1799 */
1800 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1801 cUnit->lastLIRInsn);
1802
1803 /*
1804 * Generate an unconditional branch to the fallthrough block.
1805 */
1806 if (bb->fallThrough) {
1807 genUnconditionalBranch(cUnit,
1808 &labelList[bb->fallThrough->id]);
1809 }
1810 }
1811 return false;
1812}
1813
1814/*
1815 * Nop any unconditional branches that go to the next instruction.
1816 * Note: new redundant branches may be inserted later, and we'll
1817 * use a check in final instruction assembly to nop those out.
1818 */
1819void removeRedundantBranches(CompilationUnit* cUnit)
1820{
1821 ArmLIR* thisLIR;
1822
1823 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1824 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1825 thisLIR = NEXT_LIR(thisLIR)) {
1826
1827 /* Branch to the next instruction */
1828 if ((thisLIR->opcode == kThumbBUncond) ||
1829 (thisLIR->opcode == kThumb2BUncond)) {
1830 ArmLIR* nextLIR = thisLIR;
1831
1832 while (true) {
1833 nextLIR = NEXT_LIR(nextLIR);
1834
1835 /*
1836 * Is the branch target the next instruction?
1837 */
1838 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1839 thisLIR->flags.isNop = true;
1840 break;
1841 }
1842
1843 /*
1844 * Found real useful stuff between the branch and the target.
1845 * Need to explicitly check the lastLIRInsn here because it
1846 * might be the last real instruction.
1847 */
1848 if (!isPseudoOpcode(nextLIR->opcode) ||
1849 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1850 break;
1851 }
1852 }
1853 }
1854}
1855
1856void oatMethodMIR2LIR(CompilationUnit* cUnit)
1857{
1858 /* Used to hold the labels of each block */
1859 cUnit->blockLabelList =
1860 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
1861
1862 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
1863 kPreOrderDFSTraversal, false /* Iterative */);
1864 removeRedundantBranches(cUnit);
1865}
1866
1867/* Common initialization routine for an architecture family */
1868bool oatArchInit()
1869{
1870 int i;
1871
1872 for (i = 0; i < kArmLast; i++) {
1873 if (EncodingMap[i].opcode != i) {
1874 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1875 " is wrong: expecting " << i << ", seeing " <<
1876 (int)EncodingMap[i].opcode;
1877 }
1878 }
1879
1880 return oatArchVariantInit();
1881}
1882
1883/* Needed by the Assembler */
1884void oatSetupResourceMasks(ArmLIR* lir)
1885{
1886 setupResourceMasks(lir);
1887}
1888
1889/* Needed by the ld/st optmizatons */
1890ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
1891{
1892 return genRegCopyNoInsert(cUnit, rDest, rSrc);
1893}
1894
1895/* Needed by the register allocator */
1896ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
1897{
1898 return genRegCopy(cUnit, rDest, rSrc);
1899}
1900
1901/* Needed by the register allocator */
1902void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
1903 int srcLo, int srcHi)
1904{
1905 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
1906}
1907
1908void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
1909 int displacement, int rSrc, OpSize size)
1910{
1911 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
1912}
1913
1914void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
1915 int displacement, int rSrcLo, int rSrcHi)
1916{
1917 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
1918}