blob: 0af213f6fcaa620f4f40b2aa236b2ca26e3dd1ca [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes1240dad2011-09-09 16:24:50 -070017#define DISPLAY_MISSING_TARGETS 1
18
buzbee67bf8852011-08-17 17:51:35 -070019static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
20 INVALID_REG, INVALID_SREG, 0,
21 kLocDalvikFrame, INVALID_REG, INVALID_REG,
22 INVALID_OFFSET};
23static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
24static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
25
buzbeedfd3d702011-08-28 12:56:51 -070026/*
27 * Let helper function take care of everything. Will call
28 * Array::AllocFromCode(type_idx, method, count);
29 * Note: AllocFromCode will handle checks for errNegativeArraySize.
30 */
buzbee67bf8852011-08-17 17:51:35 -070031static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
32 RegLocation rlSrc)
33{
buzbeedfd3d702011-08-28 12:56:51 -070034 oatFlushAllRegs(cUnit); /* Everything to home location */
35 loadWordDisp(cUnit, rSELF,
36 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
37 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
38 loadConstant(cUnit, r0, mir->dalvikInsn.vC); // arg0 <- type_id
39 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
buzbeeec5adf32011-09-11 15:25:43 -070040 callUnwindableHelper(cUnit, rLR);
buzbeedfd3d702011-08-28 12:56:51 -070041 oatClobberCallRegs(cUnit);
42 RegLocation rlResult = oatGetReturn(cUnit);
43 storeValue(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -070044}
45
46/*
47 * Similar to genNewArray, but with post-allocation initialization.
48 * Verifier guarantees we're dealing with an array class. Current
49 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
50 * Current code also throws internal unimp if not 'L', '[' or 'I'.
51 */
52static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
53{
54 DecodedInstruction* dInsn = &mir->dalvikInsn;
buzbee81eccc02011-09-17 13:42:21 -070055 int elems = dInsn->vA;
56 int typeId = dInsn->vB;
buzbeedfd3d702011-08-28 12:56:51 -070057 oatFlushAllRegs(cUnit); /* Everything to home location */
buzbeedfd3d702011-08-28 12:56:51 -070058 loadWordDisp(cUnit, rSELF,
buzbee1da522d2011-09-04 11:22:20 -070059 OFFSETOF_MEMBER(Thread, pCheckAndAllocFromCode), rLR);
buzbeedfd3d702011-08-28 12:56:51 -070060 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
61 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
62 loadConstant(cUnit, r2, elems); // arg2 <- count
buzbeeec5adf32011-09-11 15:25:43 -070063 callUnwindableHelper(cUnit, rLR);
buzbee67bf8852011-08-17 17:51:35 -070064 /*
buzbeedfd3d702011-08-28 12:56:51 -070065 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
66 * return region. Because AllocFromCode placed the new array
67 * in r0, we'll just lock it into place. When debugger support is
68 * added, it may be necessary to additionally copy all return
69 * values to a home location in thread-local storage
buzbee67bf8852011-08-17 17:51:35 -070070 */
buzbee67bf8852011-08-17 17:51:35 -070071 oatLockTemp(cUnit, r0);
buzbeedfd3d702011-08-28 12:56:51 -070072
buzbee67bf8852011-08-17 17:51:35 -070073 // Having a range of 0 is legal
74 if (isRange && (dInsn->vA > 0)) {
75 /*
76 * Bit of ugliness here. We're going generate a mem copy loop
77 * on the register range, but it is possible that some regs
78 * in the range have been promoted. This is unlikely, but
79 * before generating the copy, we'll just force a flush
80 * of any regs in the source range that have been promoted to
81 * home location.
82 */
83 for (unsigned int i = 0; i < dInsn->vA; i++) {
84 RegLocation loc = oatUpdateLoc(cUnit,
85 oatGetSrc(cUnit, mir, i));
86 if (loc.location == kLocPhysReg) {
87 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
88 }
89 }
90 /*
91 * TUNING note: generated code here could be much improved, but
92 * this is an uncommon operation and isn't especially performance
93 * critical.
94 */
95 int rSrc = oatAllocTemp(cUnit);
96 int rDst = oatAllocTemp(cUnit);
97 int rIdx = oatAllocTemp(cUnit);
98 int rVal = rLR; // Using a lot of temps, rLR is known free here
99 // Set up source pointer
100 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
101 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
102 // Set up the target pointer
103 opRegRegImm(cUnit, kOpAdd, rDst, r0,
buzbeec143c552011-08-20 17:38:58 -0700104 Array::DataOffset().Int32Value());
buzbee67bf8852011-08-17 17:51:35 -0700105 // Set up the loop counter (known to be > 0)
106 loadConstant(cUnit, rIdx, dInsn->vA);
107 // Generate the copy loop. Going backwards for convenience
108 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
109 target->defMask = ENCODE_ALL;
110 // Copy next element
111 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
112 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
113 // Use setflags encoding here
114 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
115 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
116 branch->generic.target = (LIR*)target;
117 } else if (!isRange) {
118 // TUNING: interleave
119 for (unsigned int i = 0; i < dInsn->vA; i++) {
120 RegLocation rlArg = loadValue(cUnit,
121 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbeec143c552011-08-20 17:38:58 -0700122 storeBaseDisp(cUnit, r0,
123 Array::DataOffset().Int32Value() +
buzbee67bf8852011-08-17 17:51:35 -0700124 i * 4, rlArg.lowReg, kWord);
125 // If the loadValue caused a temp to be allocated, free it
126 if (oatIsTemp(cUnit, rlArg.lowReg)) {
127 oatFreeTemp(cUnit, rlArg.lowReg);
128 }
129 }
130 }
131}
132
Brian Carlstrom845490b2011-09-19 15:56:53 -0700133Field* FindFieldWithResolvedStaticStorage(const Method* method,
134 const uint32_t fieldIdx,
135 uint32_t& resolvedTypeIdx) {
136 art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
137 Field* field = class_linker->ResolveField(fieldIdx, method, true);
138 if (field == NULL) {
139 return NULL;
140 }
141 const art::DexFile& dex_file = class_linker->
142 FindDexFile(method->GetDeclaringClass()->GetDexCache());
143 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
144 int type_idx = field_id.class_idx_;
145 Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx);
146 // Check if storage class is the same as class referred to by type idx.
147 // They may not be if the FieldId refers a subclass, but storage is in super
148 if (field->GetDeclaringClass() == klass) {
149 resolvedTypeIdx = type_idx;
150 return field;
151 }
152 // See if we can find a dex reference for the storage class.
153 // we may not if the dex file never references the super class,
154 // but usually it will.
155 std::string descriptor = field->GetDeclaringClass()->GetDescriptor()->ToModifiedUtf8();
156 for (size_t type_idx = 0; type_idx < dex_file.NumTypeIds(); type_idx++) {
157 const art::DexFile::TypeId& type_id = dex_file.GetTypeId(type_idx);
158 if (descriptor == dex_file.GetTypeDescriptor(type_id)) {
159 resolvedTypeIdx = type_idx;
160 return field;
161 }
162 }
163 return NULL; // resort to slow path
164}
165
buzbee67bf8852011-08-17 17:51:35 -0700166static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
167{
buzbeee1931742011-08-28 21:15:53 -0700168 bool isObject = ((mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
169 (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE));
buzbee1da522d2011-09-04 11:22:20 -0700170 int fieldIdx = mir->dalvikInsn.vB;
Brian Carlstrom845490b2011-09-19 15:56:53 -0700171 uint32_t typeIdx;
172 Field* field = FindFieldWithResolvedStaticStorage(cUnit->method, fieldIdx, typeIdx);
173 if (SLOW_FIELD_PATH || field == NULL) {
buzbee1da522d2011-09-04 11:22:20 -0700174 // Slow path
buzbee34cd9e52011-09-08 14:31:52 -0700175 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
176 << " unresolved at compile time";
buzbee1da522d2011-09-04 11:22:20 -0700177 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
178 : OFFSETOF_MEMBER(Thread, pSet32Static);
buzbeee1931742011-08-28 21:15:53 -0700179 oatFlushAllRegs(cUnit);
180 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
181 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
182 loadCurrMethodDirect(cUnit, r1);
183 loadValueDirect(cUnit, rlSrc, r2);
buzbeeec5adf32011-09-11 15:25:43 -0700184 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700185 oatClobberCallRegs(cUnit);
186 } else {
buzbee1da522d2011-09-04 11:22:20 -0700187 // fast path
188 int fieldOffset = field->GetOffset().Int32Value();
buzbee1da522d2011-09-04 11:22:20 -0700189 // Using fixed register to sync with slow path
190 int rMethod = r1;
191 oatLockTemp(cUnit, rMethod);
192 loadCurrMethodDirect(cUnit, rMethod);
193 int rBase = r0;
194 oatLockTemp(cUnit, rBase);
195 loadWordDisp(cUnit, rMethod,
196 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
197 rBase);
198 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
199 sizeof(int32_t*)* typeIdx, rBase);
200 // TUNING: fast path should fall through
201 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
202 loadWordDisp(cUnit, rSELF,
203 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
204 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700205 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700206 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
207 skipTarget->defMask = ENCODE_ALL;
208 branchOver->generic.target = (LIR*)skipTarget;
209 rlSrc = oatGetSrc(cUnit, mir, 0);
210 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
211 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -0700212#if ANDROID_SMP != 0
buzbee1da522d2011-09-04 11:22:20 -0700213 if (field->IsVolatile()) {
214 oatGenMemBarrier(cUnit, kSY);
215 }
buzbee67bf8852011-08-17 17:51:35 -0700216#endif
buzbee1da522d2011-09-04 11:22:20 -0700217 if (isObject) {
218 markGCCard(cUnit, rlSrc.lowReg, rBase);
219 }
220 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700221 }
buzbee67bf8852011-08-17 17:51:35 -0700222}
223
224static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
225{
buzbee1da522d2011-09-04 11:22:20 -0700226 int fieldIdx = mir->dalvikInsn.vB;
Brian Carlstrom845490b2011-09-19 15:56:53 -0700227 uint32_t typeIdx;
228 Field* field = FindFieldWithResolvedStaticStorage(cUnit->method, fieldIdx, typeIdx);
buzbee34cd9e52011-09-08 14:31:52 -0700229 if (SLOW_FIELD_PATH || field == NULL) {
230 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
231 << " unresolved at compile time";
buzbeee1931742011-08-28 21:15:53 -0700232 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700233 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pSet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700234 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
235 loadCurrMethodDirect(cUnit, r1);
236 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
buzbeeec5adf32011-09-11 15:25:43 -0700237 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700238 oatClobberCallRegs(cUnit);
239 } else {
buzbee1da522d2011-09-04 11:22:20 -0700240 // fast path
241 int fieldOffset = field->GetOffset().Int32Value();
buzbee1da522d2011-09-04 11:22:20 -0700242 // Using fixed register to sync with slow path
243 int rMethod = r1;
244 oatLockTemp(cUnit, rMethod);
245 loadCurrMethodDirect(cUnit, r1);
246 int rBase = r0;
247 oatLockTemp(cUnit, rBase);
248 loadWordDisp(cUnit, rMethod,
249 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
250 rBase);
251 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
252 sizeof(int32_t*)* typeIdx, rBase);
253 // TUNING: fast path should fall through
254 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
255 loadWordDisp(cUnit, rSELF,
256 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
257 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700258 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700259 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
260 skipTarget->defMask = ENCODE_ALL;
261 branchOver->generic.target = (LIR*)skipTarget;
262 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
263 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
264 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
265 rlSrc.highReg);
266#if ANDROID_SMP != 0
267 if (field->IsVolatile()) {
268 oatGenMemBarrier(cUnit, kSY);
269 }
buzbeec143c552011-08-20 17:38:58 -0700270#endif
buzbee1da522d2011-09-04 11:22:20 -0700271 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700272 }
buzbee67bf8852011-08-17 17:51:35 -0700273}
274
275
buzbee67bf8852011-08-17 17:51:35 -0700276static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
277 RegLocation rlResult, RegLocation rlDest)
278{
buzbee1da522d2011-09-04 11:22:20 -0700279 int fieldIdx = mir->dalvikInsn.vB;
Brian Carlstrom845490b2011-09-19 15:56:53 -0700280 uint32_t typeIdx;
281 Field* field = FindFieldWithResolvedStaticStorage(cUnit->method, fieldIdx, typeIdx);
buzbee34cd9e52011-09-08 14:31:52 -0700282 if (SLOW_FIELD_PATH || field == NULL) {
283 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
284 << " unresolved at compile time";
buzbeee1931742011-08-28 21:15:53 -0700285 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700286 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pGet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700287 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
288 loadCurrMethodDirect(cUnit, r1);
buzbeeec5adf32011-09-11 15:25:43 -0700289 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700290 RegLocation rlResult = oatGetReturnWide(cUnit);
291 storeValueWide(cUnit, rlDest, rlResult);
292 } else {
buzbee1da522d2011-09-04 11:22:20 -0700293 // Fast path
294 int fieldOffset = field->GetOffset().Int32Value();
buzbee1da522d2011-09-04 11:22:20 -0700295 // Using fixed register to sync with slow path
296 int rMethod = r1;
297 oatLockTemp(cUnit, rMethod);
298 loadCurrMethodDirect(cUnit, rMethod);
299 int rBase = r0;
300 oatLockTemp(cUnit, rBase);
301 loadWordDisp(cUnit, rMethod,
302 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
303 rBase);
304 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
305 sizeof(int32_t*)* typeIdx, rBase);
306 // TUNING: fast path should fall through
307 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
308 loadWordDisp(cUnit, rSELF,
309 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
310 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700311 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700312 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
313 skipTarget->defMask = ENCODE_ALL;
314 branchOver->generic.target = (LIR*)skipTarget;
315 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
316 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
317#if ANDROID_SMP != 0
Elliott Hughes1d3f1142011-09-13 12:00:00 -0700318 if (field->IsVolatile()) {
buzbee1da522d2011-09-04 11:22:20 -0700319 oatGenMemBarrier(cUnit, kSY);
320 }
buzbeec143c552011-08-20 17:38:58 -0700321#endif
buzbee1da522d2011-09-04 11:22:20 -0700322 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
323 rlResult.highReg, INVALID_SREG);
324 oatFreeTemp(cUnit, rBase);
325 storeValueWide(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700326 }
buzbee67bf8852011-08-17 17:51:35 -0700327}
328
329static void genSget(CompilationUnit* cUnit, MIR* mir,
330 RegLocation rlResult, RegLocation rlDest)
331{
buzbee1da522d2011-09-04 11:22:20 -0700332 int fieldIdx = mir->dalvikInsn.vB;
Brian Carlstrom845490b2011-09-19 15:56:53 -0700333 uint32_t typeIdx;
334 Field* field = FindFieldWithResolvedStaticStorage(cUnit->method, fieldIdx, typeIdx);
buzbeee1931742011-08-28 21:15:53 -0700335 bool isObject = ((mir->dalvikInsn.opcode == OP_SGET_OBJECT) ||
336 (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE));
buzbee34cd9e52011-09-08 14:31:52 -0700337 if (SLOW_FIELD_PATH || field == NULL) {
338 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
339 << " unresolved at compile time";
buzbee1da522d2011-09-04 11:22:20 -0700340 // Slow path
341 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
342 : OFFSETOF_MEMBER(Thread, pGet32Static);
buzbeee1931742011-08-28 21:15:53 -0700343 oatFlushAllRegs(cUnit);
344 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
345 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
346 loadCurrMethodDirect(cUnit, r1);
buzbeeec5adf32011-09-11 15:25:43 -0700347 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700348 RegLocation rlResult = oatGetReturn(cUnit);
349 storeValue(cUnit, rlDest, rlResult);
350 } else {
buzbee1da522d2011-09-04 11:22:20 -0700351 // Fast path
352 int fieldOffset = field->GetOffset().Int32Value();
buzbee1da522d2011-09-04 11:22:20 -0700353 // Using fixed register to sync with slow path
354 int rMethod = r1;
355 oatLockTemp(cUnit, rMethod);
356 loadCurrMethodDirect(cUnit, rMethod);
357 int rBase = r0;
358 oatLockTemp(cUnit, rBase);
359 loadWordDisp(cUnit, rMethod,
360 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
361 rBase);
362 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
363 sizeof(int32_t*)* typeIdx, rBase);
364 // TUNING: fast path should fall through
365 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
366 loadWordDisp(cUnit, rSELF,
367 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
368 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700369 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700370 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
371 skipTarget->defMask = ENCODE_ALL;
372 branchOver->generic.target = (LIR*)skipTarget;
373 rlDest = oatGetDest(cUnit, mir, 0);
374 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
buzbee67bf8852011-08-17 17:51:35 -0700375#if ANDROID_SMP != 0
Elliott Hughes1d3f1142011-09-13 12:00:00 -0700376 if (field->IsVolatile()) {
buzbee1da522d2011-09-04 11:22:20 -0700377 oatGenMemBarrier(cUnit, kSY);
378 }
buzbee67bf8852011-08-17 17:51:35 -0700379#endif
buzbee1da522d2011-09-04 11:22:20 -0700380 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
381 oatFreeTemp(cUnit, rBase);
382 storeValue(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700383 }
buzbee67bf8852011-08-17 17:51:35 -0700384}
385
buzbee561227c2011-09-02 15:28:19 -0700386typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int,
387 ArmLIR*);
buzbee67bf8852011-08-17 17:51:35 -0700388
389/*
390 * Bit of a hack here - in leiu of a real scheduling pass,
391 * emit the next instruction in static & direct invoke sequences.
392 */
393static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700394 DecodedInstruction* dInsn, int state,
395 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700396{
buzbee561227c2011-09-02 15:28:19 -0700397 DCHECK(rollback == NULL);
398 uint32_t idx = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -0700399 switch(state) {
400 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700401 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700402 break;
buzbee561227c2011-09-02 15:28:19 -0700403 case 1: // Get method->code_and_direct_methods_
404 loadWordDisp(cUnit, r0,
405 Method::GetDexCacheCodeAndDirectMethodsOffset().Int32Value(),
406 r0);
buzbee67bf8852011-08-17 17:51:35 -0700407 break;
buzbee561227c2011-09-02 15:28:19 -0700408 case 2: // Grab target method* and target code_
409 loadWordDisp(cUnit, r0,
410 art::CodeAndDirectMethods::CodeOffsetInBytes(idx), rLR);
411 loadWordDisp(cUnit, r0,
412 art::CodeAndDirectMethods::MethodOffsetInBytes(idx), r0);
buzbeec5ef0462011-08-25 18:44:49 -0700413 break;
414 default:
415 return -1;
416 }
417 return state + 1;
418}
419
buzbee67bf8852011-08-17 17:51:35 -0700420/*
421 * Bit of a hack here - in leiu of a real scheduling pass,
422 * emit the next instruction in a virtual invoke sequence.
423 * We can use rLR as a temp prior to target address loading
424 * Note also that we'll load the first argument ("this") into
425 * r1 here rather than the standard loadArgRegs.
426 */
427static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700428 DecodedInstruction* dInsn, int state,
429 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700430{
buzbee561227c2011-09-02 15:28:19 -0700431 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700432 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700433 /*
434 * This is the fast path in which the target virtual method is
435 * fully resolved at compile time.
436 */
Brian Carlstrom845490b2011-09-19 15:56:53 -0700437 art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
438 Method* baseMethod = class_linker->ResolveMethod(dInsn->vB, cUnit->method, false);
buzbee561227c2011-09-02 15:28:19 -0700439 CHECK(baseMethod != NULL);
440 uint32_t target_idx = baseMethod->GetMethodIndex();
buzbee67bf8852011-08-17 17:51:35 -0700441 switch(state) {
buzbee561227c2011-09-02 15:28:19 -0700442 case 0: // Get "this" [set r1]
buzbee67bf8852011-08-17 17:51:35 -0700443 rlArg = oatGetSrc(cUnit, mir, 0);
444 loadValueDirectFixed(cUnit, rlArg, r1);
445 break;
buzbee561227c2011-09-02 15:28:19 -0700446 case 1: // Is "this" null? [use r1]
buzbee5ade1d22011-09-09 14:44:52 -0700447 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee561227c2011-09-02 15:28:19 -0700448 // get this->klass_ [use r1, set rLR]
449 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700450 break;
buzbee561227c2011-09-02 15:28:19 -0700451 case 2: // Get this->klass_->vtable [usr rLR, set rLR]
452 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700453 break;
buzbee561227c2011-09-02 15:28:19 -0700454 case 3: // Get target method [use rLR, set r0]
455 loadWordDisp(cUnit, rLR, (target_idx * 4) +
456 art::Array::DataOffset().Int32Value(), r0);
457 break;
458 case 4: // Get the target compiled code address [uses r0, sets rLR]
459 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700460 break;
461 default:
462 return -1;
463 }
464 return state + 1;
465}
466
buzbee7b1b86d2011-08-26 18:59:10 -0700467static int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700468 DecodedInstruction* dInsn, int state,
469 ArmLIR* rollback)
buzbee7b1b86d2011-08-26 18:59:10 -0700470{
buzbee561227c2011-09-02 15:28:19 -0700471 DCHECK(rollback != NULL);
buzbee7b1b86d2011-08-26 18:59:10 -0700472 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700473 ArmLIR* skipBranch;
474 ArmLIR* skipTarget;
475 /*
476 * This handles the case in which the base method is not fully
477 * resolved at compile time. We must generate code to test
478 * for resolution a run time, bail to the slow path if not to
479 * fill in all the tables. In the latter case, we'll restart at
480 * at the beginning of the sequence.
481 */
buzbee7b1b86d2011-08-26 18:59:10 -0700482 switch(state) {
483 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700484 loadCurrMethodDirect(cUnit, r0);
buzbee7b1b86d2011-08-26 18:59:10 -0700485 break;
buzbee561227c2011-09-02 15:28:19 -0700486 case 1: // Get method->dex_cache_resolved_methods_
487 loadWordDisp(cUnit, r0,
488 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700489 break;
buzbee561227c2011-09-02 15:28:19 -0700490 case 2: // method->dex_cache_resolved_methods_->Get(method_idx)
491 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
492 art::Array::DataOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700493 break;
buzbee561227c2011-09-02 15:28:19 -0700494 case 3: // Resolved?
495 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
496 // Slowest path, bail to helper, rollback and retry
497 loadWordDisp(cUnit, rSELF,
498 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
499 loadConstant(cUnit, r1, dInsn->vB);
buzbeeec5adf32011-09-11 15:25:43 -0700500 callUnwindableHelper(cUnit, rLR);
buzbee561227c2011-09-02 15:28:19 -0700501 genUnconditionalBranch(cUnit, rollback);
502 // Resume normal slow path
503 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
504 skipTarget->defMask = ENCODE_ALL;
505 skipBranch->generic.target = (LIR*)skipTarget;
buzbee4a3164f2011-09-03 11:25:10 -0700506 // Get base_method->method_index [usr rLR, set r0]
buzbee561227c2011-09-02 15:28:19 -0700507 loadBaseDisp(cUnit, mir, rLR,
508 Method::GetMethodIndexOffset().Int32Value(), r0,
509 kUnsignedHalf, INVALID_SREG);
buzbee7b1b86d2011-08-26 18:59:10 -0700510 // Load "this" [set r1]
511 rlArg = oatGetSrc(cUnit, mir, 0);
512 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee7b1b86d2011-08-26 18:59:10 -0700513 break;
514 case 4:
515 // Is "this" null? [use r1]
buzbee5ade1d22011-09-09 14:44:52 -0700516 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee7b1b86d2011-08-26 18:59:10 -0700517 // get this->clazz [use r1, set rLR]
buzbee561227c2011-09-02 15:28:19 -0700518 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700519 break;
buzbee561227c2011-09-02 15:28:19 -0700520 case 5:
521 // get this->klass_->vtable_ [usr rLR, set rLR]
522 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
523 DCHECK((art::Array::DataOffset().Int32Value() & 0x3) == 0);
524 // In load shadow fold vtable_ object header size into method_index_
525 opRegImm(cUnit, kOpAdd, r0,
526 art::Array::DataOffset().Int32Value() / 4);
527 // Get target Method*
528 loadBaseIndexed(cUnit, rLR, r0, r0, 2, kWord);
529 break;
530 case 6: // Get the target compiled code address [uses r0, sets rLR]
531 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700532 break;
533 default:
534 return -1;
535 }
536 return state + 1;
537}
538
buzbee67bf8852011-08-17 17:51:35 -0700539/* Load up to 3 arguments in r1..r3 */
540static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
541 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700542 int *args, NextCallInsn nextCallInsn, ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700543{
544 for (int i = 0; i < 3; i++) {
545 if (args[i] != INVALID_REG) {
buzbee1b4c8592011-08-31 10:43:51 -0700546 // Arguments are treated as a series of untyped 32-bit values.
buzbeee9a72f62011-09-04 17:59:07 -0700547 RegLocation rlArg = oatGetRawSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700548 rlArg.wide = false;
buzbee67bf8852011-08-17 17:51:35 -0700549 loadValueDirectFixed(cUnit, rlArg, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700550 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700551 }
552 }
553 return callState;
554}
555
buzbee4a3164f2011-09-03 11:25:10 -0700556// Interleave launch code for INVOKE_INTERFACE.
buzbee67bf8852011-08-17 17:51:35 -0700557static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700558 DecodedInstruction* dInsn, int state,
559 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700560{
buzbee67bf8852011-08-17 17:51:35 -0700561 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700562 case 0: // Load trampoline target
563 loadWordDisp(cUnit, rSELF,
564 OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline),
565 rLR);
566 // Load r0 with method index
567 loadConstant(cUnit, r0, dInsn->vB);
buzbee67bf8852011-08-17 17:51:35 -0700568 break;
buzbee67bf8852011-08-17 17:51:35 -0700569 default:
570 return -1;
571 }
572 return state + 1;
573}
574
buzbee67bf8852011-08-17 17:51:35 -0700575/*
576 * Interleave launch code for INVOKE_SUPER. See comments
577 * for nextVCallIns.
578 */
579static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700580 DecodedInstruction* dInsn, int state,
581 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700582{
buzbee4a3164f2011-09-03 11:25:10 -0700583 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700584 RegLocation rlArg;
buzbee4a3164f2011-09-03 11:25:10 -0700585 /*
586 * This is the fast path in which the target virtual method is
587 * fully resolved at compile time. Note also that this path assumes
588 * that the check to verify that the target method index falls
589 * within the size of the super's vtable has been done at compile-time.
590 */
Brian Carlstrom845490b2011-09-19 15:56:53 -0700591 art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
592 Method* baseMethod = class_linker->ResolveMethod(dInsn->vB, cUnit->method, false);
buzbee4a3164f2011-09-03 11:25:10 -0700593 CHECK(baseMethod != NULL);
594 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
595 CHECK(superClass != NULL);
596 int32_t target_idx = baseMethod->GetMethodIndex();
597 CHECK(superClass->GetVTable()->GetLength() > target_idx);
598 Method* targetMethod = superClass->GetVTable()->Get(target_idx);
599 CHECK(targetMethod != NULL);
buzbee67bf8852011-08-17 17:51:35 -0700600 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700601 case 0: // Get current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700602 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700603 // Load "this" [set r1]
604 rlArg = oatGetSrc(cUnit, mir, 0);
605 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee4a3164f2011-09-03 11:25:10 -0700606 // Get method->declaring_class_ [use r0, set rLR]
607 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
608 rLR);
buzbee67bf8852011-08-17 17:51:35 -0700609 // Is "this" null? [use r1]
buzbee5ade1d22011-09-09 14:44:52 -0700610 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee4a3164f2011-09-03 11:25:10 -0700611 break;
612 case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
613 loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
614 rLR);
615 break;
616 case 2: // Get ...->super_class_->vtable [u/s rLR]
617 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
618 break;
619 case 3: // Get target method [use rLR, set r0]
620 loadWordDisp(cUnit, rLR, (target_idx * 4) +
621 art::Array::DataOffset().Int32Value(), r0);
622 break;
623 case 4: // Get the target compiled code address [uses r0, sets rLR]
624 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
625 break;
buzbee67bf8852011-08-17 17:51:35 -0700626 default:
627 return -1;
628 }
buzbee4a3164f2011-09-03 11:25:10 -0700629 return state + 1;
630}
631
632/* Slow-path version of nextSuperCallInsn */
633static int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
634 DecodedInstruction* dInsn, int state,
635 ArmLIR* rollback)
636{
637 DCHECK(rollback != NULL);
638 RegLocation rlArg;
639 ArmLIR* skipBranch;
640 ArmLIR* skipTarget;
641 int tReg;
642 /*
643 * This handles the case in which the base method is not fully
644 * resolved at compile time. We must generate code to test
645 * for resolution a run time, bail to the slow path if not to
646 * fill in all the tables. In the latter case, we'll restart at
647 * at the beginning of the sequence.
648 */
649 switch(state) {
650 case 0: // Get the current Method* [sets r0]
651 loadCurrMethodDirect(cUnit, r0);
652 break;
653 case 1: // Get method->dex_cache_resolved_methods_ [usr r0, set rLR]
654 loadWordDisp(cUnit, r0,
655 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
656 break;
657 case 2: // method->dex_cache_resolved_methods_->Get(meth_idx) [u/s rLR]
658 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
659 art::Array::DataOffset().Int32Value(), rLR);
660 break;
661 case 3: // Resolved?
662 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
663 // Slowest path, bail to helper, rollback and retry
664 loadWordDisp(cUnit, rSELF,
665 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
666 loadConstant(cUnit, r1, dInsn->vB);
buzbeeec5adf32011-09-11 15:25:43 -0700667 callUnwindableHelper(cUnit, rLR);
buzbee4a3164f2011-09-03 11:25:10 -0700668 genUnconditionalBranch(cUnit, rollback);
669 // Resume normal slow path
670 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
671 skipTarget->defMask = ENCODE_ALL;
672 skipBranch->generic.target = (LIR*)skipTarget;
673 // Get base_method->method_index [usr rLR, set rLR]
674 loadBaseDisp(cUnit, mir, rLR,
675 Method::GetMethodIndexOffset().Int32Value(), rLR,
676 kUnsignedHalf, INVALID_SREG);
677 // Load "this" [set r1]
678 rlArg = oatGetSrc(cUnit, mir, 0);
679 loadValueDirectFixed(cUnit, rlArg, r1);
680 // Load curMethod->declaring_class_ [uses r0, sets r0]
681 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
682 r0);
buzbee6a0f7f52011-09-05 16:14:20 -0700683 // Null this?
buzbee5ade1d22011-09-09 14:44:52 -0700684 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee6a0f7f52011-09-05 16:14:20 -0700685 // Get method->declaring_class_->super_class [usr r0, set r0]
buzbee4a3164f2011-09-03 11:25:10 -0700686 loadWordDisp(cUnit, r0, Class::SuperClassOffset().Int32Value(), r0);
687 break;
buzbee6a0f7f52011-09-05 16:14:20 -0700688 case 4: // Get ...->super_class_->vtable [u/s r0]
buzbee4a3164f2011-09-03 11:25:10 -0700689 loadWordDisp(cUnit, r0, Class::VTableOffset().Int32Value(), r0);
buzbee43a36422011-09-14 14:00:13 -0700690 if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
buzbee4a3164f2011-09-03 11:25:10 -0700691 // Range check, throw NSM on failure
692 tReg = oatAllocTemp(cUnit);
693 loadWordDisp(cUnit, r0, art::Array::LengthOffset().Int32Value(),
694 tReg);
buzbeeec5adf32011-09-11 15:25:43 -0700695 genRegRegCheck(cUnit, kArmCondCs, tReg, rLR, mir,
696 kArmThrowNoSuchMethod);
buzbee4a3164f2011-09-03 11:25:10 -0700697 oatFreeTemp(cUnit, tReg);
698 }
buzbee6a0f7f52011-09-05 16:14:20 -0700699 // Adjust vtable_ base past object header
700 opRegImm(cUnit, kOpAdd, r0, art::Array::DataOffset().Int32Value());
buzbee4a3164f2011-09-03 11:25:10 -0700701 // Get target Method*
buzbee6a0f7f52011-09-05 16:14:20 -0700702 loadBaseIndexed(cUnit, r0, rLR, r0, 2, kWord);
buzbee4a3164f2011-09-03 11:25:10 -0700703 break;
buzbee6a0f7f52011-09-05 16:14:20 -0700704 case 5: // Get the target compiled code address [uses r0, sets rLR]
buzbee4a3164f2011-09-03 11:25:10 -0700705 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
706 break;
707 default:
708 return -1;
709 }
buzbee67bf8852011-08-17 17:51:35 -0700710 return state + 1;
711}
712
713/*
714 * Load up to 5 arguments, the first three of which will be in
715 * r1 .. r3. On entry r0 contains the current method pointer,
716 * and as part of the load sequence, it must be replaced with
717 * the target method pointer. Note, this may also be called
718 * for "range" variants if the number of arguments is 5 or fewer.
719 */
720static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
721 DecodedInstruction* dInsn, int callState,
722 ArmLIR** pcrLabel, bool isRange,
buzbee1da522d2011-09-04 11:22:20 -0700723 NextCallInsn nextCallInsn, ArmLIR* rollback,
724 bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700725{
726 RegLocation rlArg;
727 int registerArgs[3];
728
729 /* If no arguments, just return */
730 if (dInsn->vA == 0)
731 return callState;
732
buzbee561227c2011-09-02 15:28:19 -0700733 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700734
735 /*
736 * Load frame arguments arg4 & arg5 first. Coded a little odd to
737 * pre-schedule the method pointer target.
738 */
739 for (unsigned int i=3; i < dInsn->vA; i++) {
740 int reg;
buzbeeec5adf32011-09-11 15:25:43 -0700741 // Treating args as untyped 32-bit chunks
742 rlArg = oatGetRawSrc(cUnit, mir, i);
743 rlArg.wide = false;
744 rlArg = oatUpdateLoc(cUnit, rlArg);
buzbee67bf8852011-08-17 17:51:35 -0700745 if (rlArg.location == kLocPhysReg) {
746 reg = rlArg.lowReg;
747 } else {
buzbee109bd6a2011-09-06 13:58:41 -0700748 // r3 is the last arg register loaded, so can safely be used here
749 reg = r3;
750 loadValueDirectFixed(cUnit, rlArg, reg);
buzbee561227c2011-09-02 15:28:19 -0700751 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700752 }
753 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700754 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700755 }
756
757 /* Load register arguments r1..r3 */
buzbeee9a72f62011-09-04 17:59:07 -0700758 for (unsigned int i = 0; i < 3; i++) {
buzbee67bf8852011-08-17 17:51:35 -0700759 if (i < dInsn->vA)
760 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
761 else
762 registerArgs[i] = INVALID_REG;
763 }
buzbeee9a72f62011-09-04 17:59:07 -0700764 if (skipThis) {
765 registerArgs[0] = INVALID_REG;
766 }
buzbee67bf8852011-08-17 17:51:35 -0700767 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
buzbee561227c2011-09-02 15:28:19 -0700768 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700769
buzbee6a0f7f52011-09-05 16:14:20 -0700770 //TODO: better to move this into CallInsn lists
buzbee67bf8852011-08-17 17:51:35 -0700771 // Load direct & need a "this" null check?
772 if (pcrLabel) {
buzbee5ade1d22011-09-09 14:44:52 -0700773 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee67bf8852011-08-17 17:51:35 -0700774 }
775 return callState;
776}
777
778/*
779 * May have 0+ arguments (also used for jumbo). Note that
780 * source virtual registers may be in physical registers, so may
781 * need to be flushed to home location before copying. This
782 * applies to arg3 and above (see below).
783 *
784 * Two general strategies:
785 * If < 20 arguments
786 * Pass args 3-18 using vldm/vstm block copy
787 * Pass arg0, arg1 & arg2 in r1-r3
788 * If 20+ arguments
789 * Pass args arg19+ using memcpy block copy
790 * Pass arg0, arg1 & arg2 in r1-r3
791 *
792 */
793static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
794 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700795 ArmLIR** pcrLabel, NextCallInsn nextCallInsn,
buzbee1da522d2011-09-04 11:22:20 -0700796 ArmLIR* rollback, bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700797{
798 int firstArg = dInsn->vC;
799 int numArgs = dInsn->vA;
buzbeee9a72f62011-09-04 17:59:07 -0700800 int registerArgs[3];
801
buzbee67bf8852011-08-17 17:51:35 -0700802 // If we can treat it as non-range (Jumbo ops will use range form)
803 if (numArgs <= 5)
804 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
buzbee1da522d2011-09-04 11:22:20 -0700805 true, nextCallInsn, rollback, skipThis);
buzbee67bf8852011-08-17 17:51:35 -0700806 /*
807 * Make sure range list doesn't span the break between in normal
808 * Dalvik vRegs and the ins.
809 */
buzbee1b4c8592011-08-31 10:43:51 -0700810 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700811 int boundaryReg = cUnit->method->NumRegisters() - cUnit->method->NumIns();
buzbee1b4c8592011-08-31 10:43:51 -0700812 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
813 LOG(FATAL) << "Argument list spanned locals & args";
buzbee67bf8852011-08-17 17:51:35 -0700814 }
815
816 /*
817 * First load the non-register arguments. Both forms expect all
818 * of the source arguments to be in their home frame location, so
819 * scan the sReg names and flush any that have been promoted to
820 * frame backing storage.
821 */
822 // Scan the rest of the args - if in physReg flush to memory
buzbee0c7f26d2011-09-07 12:28:51 -0700823 for (int i = 3; i < numArgs; i++) {
buzbeee9a72f62011-09-04 17:59:07 -0700824 RegLocation loc = oatGetRawSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700825 if (loc.wide) {
826 loc = oatUpdateLocWide(cUnit, loc);
827 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
828 storeBaseDispWide(cUnit, rSP, loc.spOffset, loc.lowReg,
829 loc.highReg);
buzbee561227c2011-09-02 15:28:19 -0700830 callState = nextCallInsn(cUnit, mir, dInsn, callState,
831 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700832 }
833 } else {
834 loc = oatUpdateLoc(cUnit, loc);
835 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
836 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700837 callState = nextCallInsn(cUnit, mir, dInsn, callState,
838 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700839 }
buzbee67bf8852011-08-17 17:51:35 -0700840 }
841 }
842
843 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
844 int outsOffset = 4 /* Method* */ + (3 * 4);
845 if (numArgs >= 20) {
buzbeec0fe6c72011-09-18 20:19:14 -0700846 // Generate memcpy
847 opRegRegImm(cUnit, kOpAdd, r0, rSP, outsOffset);
848 opRegRegImm(cUnit, kOpAdd, r1, rSP, startOffset);
buzbee67bf8852011-08-17 17:51:35 -0700849 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
850 loadConstant(cUnit, r2, (numArgs - 3) * 4);
buzbeeec5adf32011-09-11 15:25:43 -0700851 callNoUnwindHelper(cUnit, rLR);
buzbee67bf8852011-08-17 17:51:35 -0700852 } else {
853 // Use vldm/vstm pair using r3 as a temp
buzbeec143c552011-08-20 17:38:58 -0700854 int regsLeft = std::min(numArgs - 3, 16);
buzbee561227c2011-09-02 15:28:19 -0700855 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700856 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
buzbeef48e9712011-09-15 17:54:28 -0700857 ArmLIR* ld = newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
858 //TUNING: loosen barrier
859 ld->defMask = ENCODE_ALL;
860 setMemRefType(ld, true /* isLoad */, kDalvikReg);
buzbee561227c2011-09-02 15:28:19 -0700861 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700862 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
buzbee561227c2011-09-02 15:28:19 -0700863 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbeef48e9712011-09-15 17:54:28 -0700864 ArmLIR* st = newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
865 setMemRefType(st, false /* isLoad */, kDalvikReg);
866 st->defMask = ENCODE_ALL;
buzbee561227c2011-09-02 15:28:19 -0700867 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700868 }
869
870 // Handle the 1st 3 in r1, r2 & r3
buzbeee9a72f62011-09-04 17:59:07 -0700871 for (unsigned int i = 0; i < 3; i++) {
872 if (i < dInsn->vA)
873 registerArgs[i] = dInsn->vC + i;
874 else
875 registerArgs[i] = INVALID_REG;
buzbee67bf8852011-08-17 17:51:35 -0700876 }
buzbeee9a72f62011-09-04 17:59:07 -0700877 if (skipThis) {
878 registerArgs[0] = INVALID_REG;
879 }
880 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
881 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700882
buzbee561227c2011-09-02 15:28:19 -0700883 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700884 return callState;
885}
886
buzbee2a475e72011-09-07 17:19:17 -0700887#ifdef DISPLAY_MISSING_TARGETS
888// Debugging routine - if null target, branch to DebugMe
889static void genShowTarget(CompilationUnit* cUnit)
890{
891 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
892 loadWordDisp(cUnit, rSELF,
893 OFFSETOF_MEMBER(Thread, pDebugMe), rLR);
894 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
895 target->defMask = -1;
896 branchOver->generic.target = (LIR*)target;
897}
898#endif
899
buzbee561227c2011-09-02 15:28:19 -0700900static void genInvokeStaticDirect(CompilationUnit* cUnit, MIR* mir,
901 bool direct, bool range)
buzbee67bf8852011-08-17 17:51:35 -0700902{
903 DecodedInstruction* dInsn = &mir->dalvikInsn;
904 int callState = 0;
905 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700906 ArmLIR** pNullCk = direct ? &nullCk : NULL;
buzbee561227c2011-09-02 15:28:19 -0700907 NextCallInsn nextCallInsn = nextSDCallInsn;
908
buzbee109bd6a2011-09-06 13:58:41 -0700909 // Explicit register usage
910 oatLockCallTemps(cUnit);
911
buzbee561227c2011-09-02 15:28:19 -0700912 if (range) {
913 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700914 nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700915 } else {
916 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700917 false, nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700918 }
buzbee67bf8852011-08-17 17:51:35 -0700919 // Finish up any of the call sequence not interleaved in arg loading
920 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700921 callState = nextCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700922 }
buzbee2a475e72011-09-07 17:19:17 -0700923#ifdef DISPLAY_MISSING_TARGETS
924 genShowTarget(cUnit);
925#endif
buzbeeec5adf32011-09-11 15:25:43 -0700926 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -0700927}
928
buzbee4a3164f2011-09-03 11:25:10 -0700929/*
930 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
931 * which will locate the target and continue on via a tail call.
932 */
buzbee67bf8852011-08-17 17:51:35 -0700933static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
934{
935 DecodedInstruction* dInsn = &mir->dalvikInsn;
936 int callState = 0;
937 ArmLIR* nullCk;
buzbee109bd6a2011-09-06 13:58:41 -0700938
939 // Explicit register usage
940 oatLockCallTemps(cUnit);
buzbee67bf8852011-08-17 17:51:35 -0700941 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
buzbee561227c2011-09-02 15:28:19 -0700942 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700943 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
944 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700945 false, nextInterfaceCallInsn, NULL,
buzbee367ce0b2011-09-14 23:19:50 -0700946 false);
buzbee67bf8852011-08-17 17:51:35 -0700947 else
948 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee367ce0b2011-09-14 23:19:50 -0700949 nextInterfaceCallInsn, NULL, false);
buzbee67bf8852011-08-17 17:51:35 -0700950 // Finish up any of the call sequence not interleaved in arg loading
951 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700952 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700953 }
buzbee2a475e72011-09-07 17:19:17 -0700954#ifdef DISPLAY_MISSING_TARGETS
955 genShowTarget(cUnit);
956#endif
buzbeeec5adf32011-09-11 15:25:43 -0700957 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -0700958}
959
960static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
961{
962 DecodedInstruction* dInsn = &mir->dalvikInsn;
963 int callState = 0;
964 ArmLIR* nullCk;
buzbee4a3164f2011-09-03 11:25:10 -0700965 ArmLIR* rollback;
Brian Carlstrom845490b2011-09-19 15:56:53 -0700966 art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
967 Method* baseMethod = class_linker->ResolveMethod(dInsn->vB, cUnit->method, false);
buzbee4a3164f2011-09-03 11:25:10 -0700968 NextCallInsn nextCallInsn;
969 bool fastPath = true;
buzbee109bd6a2011-09-06 13:58:41 -0700970
971 // Explicit register usage
972 oatLockCallTemps(cUnit);
buzbee34cd9e52011-09-08 14:31:52 -0700973 if (SLOW_INVOKE_PATH || baseMethod == NULL) {
buzbee4a3164f2011-09-03 11:25:10 -0700974 fastPath = false;
975 } else {
976 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
977 if (superClass == NULL) {
978 fastPath = false;
979 } else {
980 int32_t target_idx = baseMethod->GetMethodIndex();
981 if (superClass->GetVTable()->GetLength() <= target_idx) {
982 fastPath = false;
983 } else {
984 fastPath = (superClass->GetVTable()->Get(target_idx) != NULL);
985 }
986 }
987 }
988 if (fastPath) {
989 nextCallInsn = nextSuperCallInsn;
990 rollback = NULL;
991 } else {
992 nextCallInsn = nextSuperCallInsnSP;
993 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
994 rollback->defMask = -1;
995 }
buzbee67bf8852011-08-17 17:51:35 -0700996 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
997 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700998 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700999 else
1000 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -07001001 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -07001002 // Finish up any of the call sequence not interleaved in arg loading
1003 while (callState >= 0) {
buzbee6a0f7f52011-09-05 16:14:20 -07001004 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -07001005 }
buzbee2a475e72011-09-07 17:19:17 -07001006#ifdef DISPLAY_MISSING_TARGETS
1007 genShowTarget(cUnit);
1008#endif
buzbeeec5adf32011-09-11 15:25:43 -07001009 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -07001010}
1011
1012static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
1013{
1014 DecodedInstruction* dInsn = &mir->dalvikInsn;
1015 int callState = 0;
1016 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -07001017 ArmLIR* rollback;
Brian Carlstrom845490b2011-09-19 15:56:53 -07001018 art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
1019 Method* method = class_linker->ResolveMethod(dInsn->vB, cUnit->method, false);
buzbee561227c2011-09-02 15:28:19 -07001020 NextCallInsn nextCallInsn;
buzbee7b1b86d2011-08-26 18:59:10 -07001021
buzbee109bd6a2011-09-06 13:58:41 -07001022 // Explicit register usage
1023 oatLockCallTemps(cUnit);
buzbee34cd9e52011-09-08 14:31:52 -07001024 if (SLOW_INVOKE_PATH || method == NULL) {
buzbee561227c2011-09-02 15:28:19 -07001025 // Slow path
1026 nextCallInsn = nextVCallInsnSP;
1027 // If we need a slow-path callout, we'll restart here
1028 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
1029 rollback->defMask = -1;
1030 } else {
1031 // Fast path
1032 nextCallInsn = nextVCallInsn;
1033 rollback = NULL;
1034 }
buzbee67bf8852011-08-17 17:51:35 -07001035 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
1036 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -07001037 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -07001038 else
1039 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -07001040 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -07001041 // Finish up any of the call sequence not interleaved in arg loading
1042 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -07001043 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -07001044 }
buzbee2a475e72011-09-07 17:19:17 -07001045#ifdef DISPLAY_MISSING_TARGETS
1046 genShowTarget(cUnit);
1047#endif
buzbeeec5adf32011-09-11 15:25:43 -07001048 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -07001049}
1050
buzbee67bf8852011-08-17 17:51:35 -07001051static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
1052 BasicBlock* bb, ArmLIR* labelList)
1053{
1054 bool res = false; // Assume success
1055 RegLocation rlSrc[3];
1056 RegLocation rlDest = badLoc;
1057 RegLocation rlResult = badLoc;
1058 Opcode opcode = mir->dalvikInsn.opcode;
1059
1060 /* Prep Src and Dest locations */
1061 int nextSreg = 0;
1062 int nextLoc = 0;
1063 int attrs = oatDataFlowAttributes[opcode];
1064 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
1065 if (attrs & DF_UA) {
1066 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1067 nextSreg++;
1068 } else if (attrs & DF_UA_WIDE) {
1069 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1070 nextSreg + 1);
1071 nextSreg+= 2;
1072 }
1073 if (attrs & DF_UB) {
1074 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1075 nextSreg++;
1076 } else if (attrs & DF_UB_WIDE) {
1077 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1078 nextSreg + 1);
1079 nextSreg+= 2;
1080 }
1081 if (attrs & DF_UC) {
1082 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1083 } else if (attrs & DF_UC_WIDE) {
1084 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1085 nextSreg + 1);
1086 }
1087 if (attrs & DF_DA) {
1088 rlDest = oatGetDest(cUnit, mir, 0);
1089 } else if (attrs & DF_DA_WIDE) {
1090 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
1091 }
1092
1093 switch(opcode) {
1094 case OP_NOP:
1095 break;
1096
1097 case OP_MOVE_EXCEPTION:
1098 int exOffset;
1099 int resetReg;
buzbeec143c552011-08-20 17:38:58 -07001100 exOffset = Thread::ExceptionOffset().Int32Value();
buzbee67bf8852011-08-17 17:51:35 -07001101 resetReg = oatAllocTemp(cUnit);
1102 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1103 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
1104 loadConstant(cUnit, resetReg, 0);
1105 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
1106 storeValue(cUnit, rlDest, rlResult);
1107 break;
1108
1109 case OP_RETURN_VOID:
1110 break;
1111
1112 case OP_RETURN:
1113 case OP_RETURN_OBJECT:
1114 storeValue(cUnit, retLoc, rlSrc[0]);
1115 break;
1116
1117 case OP_RETURN_WIDE:
1118 rlDest = retLocWide;
1119 rlDest.fp = rlSrc[0].fp;
1120 storeValueWide(cUnit, rlDest, rlSrc[0]);
1121 break;
1122
1123 case OP_MOVE_RESULT_WIDE:
buzbee43a36422011-09-14 14:00:13 -07001124 if (mir->optimizationFlags & MIR_INLINED)
buzbee67bf8852011-08-17 17:51:35 -07001125 break; // Nop - combined w/ previous invoke
1126 /*
1127 * Somewhat hacky here. Because we're now passing
1128 * return values in registers, we have to let the
1129 * register allocation utilities know that the return
1130 * registers are live and may not be used for address
1131 * formation in storeValueWide.
1132 */
1133 assert(retLocWide.lowReg == r0);
buzbee1da522d2011-09-04 11:22:20 -07001134 assert(retLocWide.highReg == r1);
buzbee67bf8852011-08-17 17:51:35 -07001135 oatLockTemp(cUnit, retLocWide.lowReg);
1136 oatLockTemp(cUnit, retLocWide.highReg);
1137 storeValueWide(cUnit, rlDest, retLocWide);
1138 oatFreeTemp(cUnit, retLocWide.lowReg);
1139 oatFreeTemp(cUnit, retLocWide.highReg);
1140 break;
1141
1142 case OP_MOVE_RESULT:
1143 case OP_MOVE_RESULT_OBJECT:
buzbee43a36422011-09-14 14:00:13 -07001144 if (mir->optimizationFlags & MIR_INLINED)
buzbee67bf8852011-08-17 17:51:35 -07001145 break; // Nop - combined w/ previous invoke
1146 /* See comment for OP_MOVE_RESULT_WIDE */
1147 assert(retLoc.lowReg == r0);
1148 oatLockTemp(cUnit, retLoc.lowReg);
1149 storeValue(cUnit, rlDest, retLoc);
1150 oatFreeTemp(cUnit, retLoc.lowReg);
1151 break;
1152
1153 case OP_MOVE:
1154 case OP_MOVE_OBJECT:
1155 case OP_MOVE_16:
1156 case OP_MOVE_OBJECT_16:
1157 case OP_MOVE_FROM16:
1158 case OP_MOVE_OBJECT_FROM16:
1159 storeValue(cUnit, rlDest, rlSrc[0]);
1160 break;
1161
1162 case OP_MOVE_WIDE:
1163 case OP_MOVE_WIDE_16:
1164 case OP_MOVE_WIDE_FROM16:
1165 storeValueWide(cUnit, rlDest, rlSrc[0]);
1166 break;
1167
1168 case OP_CONST:
1169 case OP_CONST_4:
1170 case OP_CONST_16:
1171 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1172 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1173 storeValue(cUnit, rlDest, rlResult);
1174 break;
1175
1176 case OP_CONST_HIGH16:
1177 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1178 loadConstantNoClobber(cUnit, rlResult.lowReg,
1179 mir->dalvikInsn.vB << 16);
1180 storeValue(cUnit, rlDest, rlResult);
1181 break;
1182
1183 case OP_CONST_WIDE_16:
1184 case OP_CONST_WIDE_32:
1185 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1186 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1187 //TUNING: do high separately to avoid load dependency
1188 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
1189 storeValueWide(cUnit, rlDest, rlResult);
1190 break;
1191
1192 case OP_CONST_WIDE:
1193 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1194 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
buzbee54330722011-08-23 16:46:55 -07001195 mir->dalvikInsn.vB_wide & 0xffffffff,
1196 (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
buzbee3ea4ec52011-08-22 17:37:19 -07001197 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001198 break;
1199
1200 case OP_CONST_WIDE_HIGH16:
1201 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1202 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
1203 0, mir->dalvikInsn.vB << 16);
buzbee7b1b86d2011-08-26 18:59:10 -07001204 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001205 break;
1206
1207 case OP_MONITOR_ENTER:
1208 genMonitorEnter(cUnit, mir, rlSrc[0]);
1209 break;
1210
1211 case OP_MONITOR_EXIT:
1212 genMonitorExit(cUnit, mir, rlSrc[0]);
1213 break;
1214
1215 case OP_CHECK_CAST:
1216 genCheckCast(cUnit, mir, rlSrc[0]);
1217 break;
1218
1219 case OP_INSTANCE_OF:
1220 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
1221 break;
1222
1223 case OP_NEW_INSTANCE:
1224 genNewInstance(cUnit, mir, rlDest);
1225 break;
1226
1227 case OP_THROW:
1228 genThrow(cUnit, mir, rlSrc[0]);
1229 break;
1230
buzbee5ade1d22011-09-09 14:44:52 -07001231 case OP_THROW_VERIFICATION_ERROR:
1232 loadWordDisp(cUnit, rSELF,
1233 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode), rLR);
1234 loadConstant(cUnit, r0, mir->dalvikInsn.vA);
1235 loadConstant(cUnit, r1, mir->dalvikInsn.vB);
buzbeeec5adf32011-09-11 15:25:43 -07001236 callUnwindableHelper(cUnit, rLR);
buzbee5ade1d22011-09-09 14:44:52 -07001237 break;
1238
buzbee67bf8852011-08-17 17:51:35 -07001239 case OP_ARRAY_LENGTH:
1240 int lenOffset;
buzbeec143c552011-08-20 17:38:58 -07001241 lenOffset = Array::LengthOffset().Int32Value();
buzbee7b1b86d2011-08-26 18:59:10 -07001242 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
buzbee5ade1d22011-09-09 14:44:52 -07001243 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg, mir);
buzbee67bf8852011-08-17 17:51:35 -07001244 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1245 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
1246 rlResult.lowReg);
1247 storeValue(cUnit, rlDest, rlResult);
1248 break;
1249
1250 case OP_CONST_STRING:
1251 case OP_CONST_STRING_JUMBO:
1252 genConstString(cUnit, mir, rlDest, rlSrc[0]);
1253 break;
1254
1255 case OP_CONST_CLASS:
1256 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
1257 break;
1258
1259 case OP_FILL_ARRAY_DATA:
1260 genFillArrayData(cUnit, mir, rlSrc[0]);
1261 break;
1262
1263 case OP_FILLED_NEW_ARRAY:
1264 genFilledNewArray(cUnit, mir, false /* not range */);
1265 break;
1266
1267 case OP_FILLED_NEW_ARRAY_RANGE:
1268 genFilledNewArray(cUnit, mir, true /* range */);
1269 break;
1270
1271 case OP_NEW_ARRAY:
1272 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
1273 break;
1274
1275 case OP_GOTO:
1276 case OP_GOTO_16:
1277 case OP_GOTO_32:
1278 // TUNING: add MIR flag to disable when unnecessary
1279 bool backwardBranch;
1280 backwardBranch = (bb->taken->startOffset <= mir->offset);
1281 if (backwardBranch) {
1282 genSuspendPoll(cUnit, mir);
1283 }
1284 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1285 break;
1286
1287 case OP_PACKED_SWITCH:
1288 genPackedSwitch(cUnit, mir, rlSrc[0]);
1289 break;
1290
1291 case OP_SPARSE_SWITCH:
1292 genSparseSwitch(cUnit, mir, rlSrc[0]);
1293 break;
1294
1295 case OP_CMPL_FLOAT:
1296 case OP_CMPG_FLOAT:
1297 case OP_CMPL_DOUBLE:
1298 case OP_CMPG_DOUBLE:
1299 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1300 break;
1301
1302 case OP_CMP_LONG:
1303 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1304 break;
1305
1306 case OP_IF_EQ:
1307 case OP_IF_NE:
1308 case OP_IF_LT:
1309 case OP_IF_GE:
1310 case OP_IF_GT:
1311 case OP_IF_LE: {
1312 bool backwardBranch;
1313 ArmConditionCode cond;
1314 backwardBranch = (bb->taken->startOffset <= mir->offset);
1315 if (backwardBranch) {
1316 genSuspendPoll(cUnit, mir);
1317 }
1318 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1319 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1320 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1321 switch(opcode) {
1322 case OP_IF_EQ:
1323 cond = kArmCondEq;
1324 break;
1325 case OP_IF_NE:
1326 cond = kArmCondNe;
1327 break;
1328 case OP_IF_LT:
1329 cond = kArmCondLt;
1330 break;
1331 case OP_IF_GE:
1332 cond = kArmCondGe;
1333 break;
1334 case OP_IF_GT:
1335 cond = kArmCondGt;
1336 break;
1337 case OP_IF_LE:
1338 cond = kArmCondLe;
1339 break;
1340 default:
1341 cond = (ArmConditionCode)0;
1342 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1343 }
1344 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1345 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1346 break;
1347 }
1348
1349 case OP_IF_EQZ:
1350 case OP_IF_NEZ:
1351 case OP_IF_LTZ:
1352 case OP_IF_GEZ:
1353 case OP_IF_GTZ:
1354 case OP_IF_LEZ: {
1355 bool backwardBranch;
1356 ArmConditionCode cond;
1357 backwardBranch = (bb->taken->startOffset <= mir->offset);
1358 if (backwardBranch) {
1359 genSuspendPoll(cUnit, mir);
1360 }
1361 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1362 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1363 switch(opcode) {
1364 case OP_IF_EQZ:
1365 cond = kArmCondEq;
1366 break;
1367 case OP_IF_NEZ:
1368 cond = kArmCondNe;
1369 break;
1370 case OP_IF_LTZ:
1371 cond = kArmCondLt;
1372 break;
1373 case OP_IF_GEZ:
1374 cond = kArmCondGe;
1375 break;
1376 case OP_IF_GTZ:
1377 cond = kArmCondGt;
1378 break;
1379 case OP_IF_LEZ:
1380 cond = kArmCondLe;
1381 break;
1382 default:
1383 cond = (ArmConditionCode)0;
1384 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1385 }
1386 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1387 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1388 break;
1389 }
1390
1391 case OP_AGET_WIDE:
1392 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1393 break;
1394 case OP_AGET:
1395 case OP_AGET_OBJECT:
1396 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1397 break;
1398 case OP_AGET_BOOLEAN:
1399 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1400 rlDest, 0);
1401 break;
1402 case OP_AGET_BYTE:
1403 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1404 break;
1405 case OP_AGET_CHAR:
1406 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1407 rlDest, 1);
1408 break;
1409 case OP_AGET_SHORT:
1410 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1411 break;
1412 case OP_APUT_WIDE:
1413 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1414 break;
1415 case OP_APUT:
1416 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1417 break;
1418 case OP_APUT_OBJECT:
buzbee1b4c8592011-08-31 10:43:51 -07001419 genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
buzbee67bf8852011-08-17 17:51:35 -07001420 break;
1421 case OP_APUT_SHORT:
1422 case OP_APUT_CHAR:
1423 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1424 rlSrc[0], 1);
1425 break;
1426 case OP_APUT_BYTE:
1427 case OP_APUT_BOOLEAN:
1428 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1429 rlSrc[0], 0);
1430 break;
1431
1432 case OP_IGET_WIDE:
1433 case OP_IGET_WIDE_VOLATILE:
buzbee43a36422011-09-14 14:00:13 -07001434 genIGetWide(cUnit, mir, rlDest, rlSrc[0]);
buzbee67bf8852011-08-17 17:51:35 -07001435 break;
1436
1437 case OP_IGET:
1438 case OP_IGET_VOLATILE:
1439 case OP_IGET_OBJECT:
1440 case OP_IGET_OBJECT_VOLATILE:
buzbee43a36422011-09-14 14:00:13 -07001441 genIGet(cUnit, mir, kWord, rlDest, rlSrc[0]);
buzbee67bf8852011-08-17 17:51:35 -07001442 break;
1443
1444 case OP_IGET_BOOLEAN:
1445 case OP_IGET_BYTE:
buzbee43a36422011-09-14 14:00:13 -07001446 genIGet(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
buzbee67bf8852011-08-17 17:51:35 -07001447 break;
1448
1449 case OP_IGET_CHAR:
buzbee43a36422011-09-14 14:00:13 -07001450 genIGet(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
buzbee67bf8852011-08-17 17:51:35 -07001451 break;
1452
1453 case OP_IGET_SHORT:
buzbee43a36422011-09-14 14:00:13 -07001454 genIGet(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
buzbee67bf8852011-08-17 17:51:35 -07001455 break;
1456
1457 case OP_IPUT_WIDE:
1458 case OP_IPUT_WIDE_VOLATILE:
buzbee43a36422011-09-14 14:00:13 -07001459 genIPutWide(cUnit, mir, rlSrc[0], rlSrc[1]);
buzbee67bf8852011-08-17 17:51:35 -07001460 break;
1461
1462 case OP_IPUT_OBJECT:
1463 case OP_IPUT_OBJECT_VOLATILE:
buzbee43a36422011-09-14 14:00:13 -07001464 genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
buzbee67bf8852011-08-17 17:51:35 -07001465 break;
1466
1467 case OP_IPUT:
1468 case OP_IPUT_VOLATILE:
buzbee43a36422011-09-14 14:00:13 -07001469 genIPut(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
buzbee67bf8852011-08-17 17:51:35 -07001470 break;
1471
1472 case OP_IPUT_BOOLEAN:
1473 case OP_IPUT_BYTE:
buzbee43a36422011-09-14 14:00:13 -07001474 genIPut(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
buzbee67bf8852011-08-17 17:51:35 -07001475 break;
1476
1477 case OP_IPUT_CHAR:
buzbee43a36422011-09-14 14:00:13 -07001478 genIPut(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
buzbee67bf8852011-08-17 17:51:35 -07001479 break;
1480
1481 case OP_IPUT_SHORT:
buzbee43a36422011-09-14 14:00:13 -07001482 genIPut(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
buzbee67bf8852011-08-17 17:51:35 -07001483 break;
1484
1485 case OP_SGET:
1486 case OP_SGET_OBJECT:
1487 case OP_SGET_BOOLEAN:
1488 case OP_SGET_BYTE:
1489 case OP_SGET_CHAR:
1490 case OP_SGET_SHORT:
1491 genSget(cUnit, mir, rlResult, rlDest);
1492 break;
1493
1494 case OP_SGET_WIDE:
1495 genSgetWide(cUnit, mir, rlResult, rlDest);
1496 break;
1497
1498 case OP_SPUT:
1499 case OP_SPUT_OBJECT:
1500 case OP_SPUT_BOOLEAN:
1501 case OP_SPUT_BYTE:
1502 case OP_SPUT_CHAR:
1503 case OP_SPUT_SHORT:
1504 genSput(cUnit, mir, rlSrc[0]);
1505 break;
1506
1507 case OP_SPUT_WIDE:
1508 genSputWide(cUnit, mir, rlSrc[0]);
1509 break;
1510
1511 case OP_INVOKE_STATIC_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001512 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1513 true /*range*/);
1514 break;
buzbee67bf8852011-08-17 17:51:35 -07001515 case OP_INVOKE_STATIC:
buzbee561227c2011-09-02 15:28:19 -07001516 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1517 false /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001518 break;
1519
1520 case OP_INVOKE_DIRECT:
buzbee561227c2011-09-02 15:28:19 -07001521 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1522 false /*range*/);
1523 break;
buzbee67bf8852011-08-17 17:51:35 -07001524 case OP_INVOKE_DIRECT_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001525 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1526 true /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001527 break;
1528
1529 case OP_INVOKE_VIRTUAL:
1530 case OP_INVOKE_VIRTUAL_RANGE:
1531 genInvokeVirtual(cUnit, mir);
1532 break;
1533
1534 case OP_INVOKE_SUPER:
1535 case OP_INVOKE_SUPER_RANGE:
1536 genInvokeSuper(cUnit, mir);
1537 break;
1538
1539 case OP_INVOKE_INTERFACE:
1540 case OP_INVOKE_INTERFACE_RANGE:
1541 genInvokeInterface(cUnit, mir);
1542 break;
1543
1544 case OP_NEG_INT:
1545 case OP_NOT_INT:
1546 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1547 break;
1548
1549 case OP_NEG_LONG:
1550 case OP_NOT_LONG:
1551 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1552 break;
1553
1554 case OP_NEG_FLOAT:
1555 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1556 break;
1557
1558 case OP_NEG_DOUBLE:
1559 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1560 break;
1561
1562 case OP_INT_TO_LONG:
1563 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1564 if (rlSrc[0].location == kLocPhysReg) {
1565 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1566 } else {
1567 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1568 }
1569 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1570 rlResult.lowReg, 31);
1571 storeValueWide(cUnit, rlDest, rlResult);
1572 break;
1573
1574 case OP_LONG_TO_INT:
1575 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1576 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1577 storeValue(cUnit, rlDest, rlSrc[0]);
1578 break;
1579
1580 case OP_INT_TO_BYTE:
1581 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1582 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1583 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1584 storeValue(cUnit, rlDest, rlResult);
1585 break;
1586
1587 case OP_INT_TO_SHORT:
1588 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1589 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1590 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1591 storeValue(cUnit, rlDest, rlResult);
1592 break;
1593
1594 case OP_INT_TO_CHAR:
1595 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1596 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1597 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1598 storeValue(cUnit, rlDest, rlResult);
1599 break;
1600
1601 case OP_INT_TO_FLOAT:
1602 case OP_INT_TO_DOUBLE:
1603 case OP_LONG_TO_FLOAT:
1604 case OP_LONG_TO_DOUBLE:
1605 case OP_FLOAT_TO_INT:
1606 case OP_FLOAT_TO_LONG:
1607 case OP_FLOAT_TO_DOUBLE:
1608 case OP_DOUBLE_TO_INT:
1609 case OP_DOUBLE_TO_LONG:
1610 case OP_DOUBLE_TO_FLOAT:
1611 genConversion(cUnit, mir);
1612 break;
1613
1614 case OP_ADD_INT:
1615 case OP_SUB_INT:
1616 case OP_MUL_INT:
1617 case OP_DIV_INT:
1618 case OP_REM_INT:
1619 case OP_AND_INT:
1620 case OP_OR_INT:
1621 case OP_XOR_INT:
1622 case OP_SHL_INT:
1623 case OP_SHR_INT:
1624 case OP_USHR_INT:
1625 case OP_ADD_INT_2ADDR:
1626 case OP_SUB_INT_2ADDR:
1627 case OP_MUL_INT_2ADDR:
1628 case OP_DIV_INT_2ADDR:
1629 case OP_REM_INT_2ADDR:
1630 case OP_AND_INT_2ADDR:
1631 case OP_OR_INT_2ADDR:
1632 case OP_XOR_INT_2ADDR:
1633 case OP_SHL_INT_2ADDR:
1634 case OP_SHR_INT_2ADDR:
1635 case OP_USHR_INT_2ADDR:
1636 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1637 break;
1638
1639 case OP_ADD_LONG:
1640 case OP_SUB_LONG:
1641 case OP_MUL_LONG:
1642 case OP_DIV_LONG:
1643 case OP_REM_LONG:
1644 case OP_AND_LONG:
1645 case OP_OR_LONG:
1646 case OP_XOR_LONG:
1647 case OP_ADD_LONG_2ADDR:
1648 case OP_SUB_LONG_2ADDR:
1649 case OP_MUL_LONG_2ADDR:
1650 case OP_DIV_LONG_2ADDR:
1651 case OP_REM_LONG_2ADDR:
1652 case OP_AND_LONG_2ADDR:
1653 case OP_OR_LONG_2ADDR:
1654 case OP_XOR_LONG_2ADDR:
1655 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1656 break;
1657
buzbee67bf8852011-08-17 17:51:35 -07001658 case OP_SHL_LONG:
1659 case OP_SHR_LONG:
1660 case OP_USHR_LONG:
buzbeee6d61962011-08-27 11:58:19 -07001661 case OP_SHL_LONG_2ADDR:
1662 case OP_SHR_LONG_2ADDR:
1663 case OP_USHR_LONG_2ADDR:
buzbee67bf8852011-08-17 17:51:35 -07001664 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1665 break;
1666
1667 case OP_ADD_FLOAT:
1668 case OP_SUB_FLOAT:
1669 case OP_MUL_FLOAT:
1670 case OP_DIV_FLOAT:
1671 case OP_REM_FLOAT:
1672 case OP_ADD_FLOAT_2ADDR:
1673 case OP_SUB_FLOAT_2ADDR:
1674 case OP_MUL_FLOAT_2ADDR:
1675 case OP_DIV_FLOAT_2ADDR:
1676 case OP_REM_FLOAT_2ADDR:
1677 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1678 break;
1679
1680 case OP_ADD_DOUBLE:
1681 case OP_SUB_DOUBLE:
1682 case OP_MUL_DOUBLE:
1683 case OP_DIV_DOUBLE:
1684 case OP_REM_DOUBLE:
1685 case OP_ADD_DOUBLE_2ADDR:
1686 case OP_SUB_DOUBLE_2ADDR:
1687 case OP_MUL_DOUBLE_2ADDR:
1688 case OP_DIV_DOUBLE_2ADDR:
1689 case OP_REM_DOUBLE_2ADDR:
1690 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1691 break;
1692
1693 case OP_RSUB_INT:
1694 case OP_ADD_INT_LIT16:
1695 case OP_MUL_INT_LIT16:
1696 case OP_DIV_INT_LIT16:
1697 case OP_REM_INT_LIT16:
1698 case OP_AND_INT_LIT16:
1699 case OP_OR_INT_LIT16:
1700 case OP_XOR_INT_LIT16:
1701 case OP_ADD_INT_LIT8:
1702 case OP_RSUB_INT_LIT8:
1703 case OP_MUL_INT_LIT8:
1704 case OP_DIV_INT_LIT8:
1705 case OP_REM_INT_LIT8:
1706 case OP_AND_INT_LIT8:
1707 case OP_OR_INT_LIT8:
1708 case OP_XOR_INT_LIT8:
1709 case OP_SHL_INT_LIT8:
1710 case OP_SHR_INT_LIT8:
1711 case OP_USHR_INT_LIT8:
1712 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1713 break;
1714
1715 default:
1716 res = true;
1717 }
1718 return res;
1719}
1720
1721static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1722 "kMirOpPhi",
1723 "kMirOpNullNRangeUpCheck",
1724 "kMirOpNullNRangeDownCheck",
1725 "kMirOpLowerBound",
1726 "kMirOpPunt",
1727 "kMirOpCheckInlinePrediction",
1728};
1729
1730/* Extended MIR instructions like PHI */
1731static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1732{
1733 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1734 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1735 strcpy(msg, extendedMIROpNames[opOffset]);
1736 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1737
1738 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1739 case kMirOpPhi: {
1740 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1741 op->flags.isNop = true;
1742 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1743 break;
1744 }
1745 default:
1746 break;
1747 }
1748}
1749
1750/* If there are any ins passed in registers that have not been promoted
1751 * to a callee-save register, flush them to the frame.
buzbeedfd3d702011-08-28 12:56:51 -07001752 * Note: at this pointCopy any ins that are passed in register to their
1753 * home location */
buzbee67bf8852011-08-17 17:51:35 -07001754static void flushIns(CompilationUnit* cUnit)
1755{
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001756 if (cUnit->method->NumIns() == 0)
buzbee67bf8852011-08-17 17:51:35 -07001757 return;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001758 int inRegs = (cUnit->method->NumIns() > 2) ? 3
1759 : cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001760 int startReg = r1;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001761 int startLoc = cUnit->method->NumRegisters() -
1762 cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001763 for (int i = 0; i < inRegs; i++) {
1764 RegLocation loc = cUnit->regLocation[startLoc + i];
buzbeedfd3d702011-08-28 12:56:51 -07001765 //TUNING: be smarter about flushing ins to frame
1766 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
buzbee67bf8852011-08-17 17:51:35 -07001767 if (loc.location == kLocPhysReg) {
1768 genRegCopy(cUnit, loc.lowReg, startReg + i);
buzbee67bf8852011-08-17 17:51:35 -07001769 }
1770 }
1771
1772 // Handle special case of wide argument half in regs, half in frame
1773 if (inRegs == 3) {
1774 RegLocation loc = cUnit->regLocation[startLoc + 2];
1775 if (loc.wide && loc.location == kLocPhysReg) {
1776 // Load the other half of the arg into the promoted pair
buzbee561227c2011-09-02 15:28:19 -07001777 loadWordDisp(cUnit, rSP, loc.spOffset + 4, loc.highReg);
buzbee67bf8852011-08-17 17:51:35 -07001778 inRegs++;
1779 }
1780 }
1781
1782 // Now, do initial assignment of all promoted arguments passed in frame
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001783 for (int i = inRegs; i < cUnit->method->NumIns();) {
buzbee67bf8852011-08-17 17:51:35 -07001784 RegLocation loc = cUnit->regLocation[startLoc + i];
1785 if (loc.fpLocation == kLocPhysReg) {
1786 loc.location = kLocPhysReg;
1787 loc.fp = true;
1788 loc.lowReg = loc.fpLowReg;
1789 loc.highReg = loc.fpHighReg;
1790 }
1791 if (loc.location == kLocPhysReg) {
1792 if (loc.wide) {
1793 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1794 loc.lowReg, loc.highReg, INVALID_SREG);
1795 i++;
1796 } else {
buzbee561227c2011-09-02 15:28:19 -07001797 loadWordDisp(cUnit, rSP, loc.spOffset, loc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -07001798 }
1799 }
1800 i++;
1801 }
1802}
1803
1804/* Handle the content in each basic block */
1805static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1806{
1807 MIR* mir;
1808 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1809 int blockId = bb->id;
1810
1811 cUnit->curBlock = bb;
1812 labelList[blockId].operands[0] = bb->startOffset;
1813
1814 /* Insert the block label */
1815 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1816 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1817
1818 oatClobberAllRegs(cUnit);
buzbee67bf8852011-08-17 17:51:35 -07001819
1820 ArmLIR* headLIR = NULL;
1821
1822 if (bb->blockType == kEntryBlock) {
1823 /*
1824 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1825 * mechanism know so it doesn't try to use any of them when
1826 * expanding the frame or flushing. This leaves the utility
1827 * code with a single temp: r12. This should be enough.
1828 */
1829 oatLockTemp(cUnit, r0);
1830 oatLockTemp(cUnit, r1);
1831 oatLockTemp(cUnit, r2);
1832 oatLockTemp(cUnit, r3);
buzbeecefd1872011-09-09 09:59:52 -07001833
1834 /*
1835 * We can safely skip the stack overflow check if we're
1836 * a leaf *and* our frame size < fudge factor.
1837 */
1838 bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
1839 ((size_t)cUnit->frameSize <
1840 art::Thread::kStackOverflowReservedBytes));
buzbee67bf8852011-08-17 17:51:35 -07001841 newLIR0(cUnit, kArmPseudoMethodEntry);
buzbeecefd1872011-09-09 09:59:52 -07001842 if (!skipOverflowCheck) {
1843 /* Load stack limit */
1844 loadWordDisp(cUnit, rSELF,
1845 art::Thread::StackEndOffset().Int32Value(), r12);
1846 }
buzbee67bf8852011-08-17 17:51:35 -07001847 /* Spill core callee saves */
1848 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1849 /* Need to spill any FP regs? */
1850 if (cUnit->numFPSpills) {
1851 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1852 }
buzbeecefd1872011-09-09 09:59:52 -07001853 if (!skipOverflowCheck) {
1854 opRegRegImm(cUnit, kOpSub, rLR, rSP,
1855 cUnit->frameSize - (cUnit->numSpills * 4));
buzbeeec5adf32011-09-11 15:25:43 -07001856 genRegRegCheck(cUnit, kArmCondCc, rLR, r12, NULL,
1857 kArmThrowStackOverflow);
buzbeecefd1872011-09-09 09:59:52 -07001858 genRegCopy(cUnit, rSP, rLR); // Establish stack
1859 } else {
1860 opRegImm(cUnit, kOpSub, rSP,
1861 cUnit->frameSize - (cUnit->numSpills * 4));
1862 }
buzbee67bf8852011-08-17 17:51:35 -07001863 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1864 flushIns(cUnit);
1865 oatFreeTemp(cUnit, r0);
1866 oatFreeTemp(cUnit, r1);
1867 oatFreeTemp(cUnit, r2);
1868 oatFreeTemp(cUnit, r3);
1869 } else if (bb->blockType == kExitBlock) {
1870 newLIR0(cUnit, kArmPseudoMethodExit);
1871 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1872 /* Need to restore any FP callee saves? */
1873 if (cUnit->numFPSpills) {
1874 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1875 }
1876 if (cUnit->coreSpillMask & (1 << rLR)) {
1877 /* Unspill rLR to rPC */
1878 cUnit->coreSpillMask &= ~(1 << rLR);
1879 cUnit->coreSpillMask |= (1 << rPC);
1880 }
1881 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1882 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1883 /* We didn't pop to rPC, so must do a bv rLR */
1884 newLIR1(cUnit, kThumbBx, rLR);
1885 }
1886 }
1887
1888 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1889
1890 oatResetRegPool(cUnit);
buzbeef0cde542011-09-13 14:55:02 -07001891 oatClobberAllRegs(cUnit);
buzbee67bf8852011-08-17 17:51:35 -07001892
1893 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1894 oatResetDefTracking(cUnit);
1895 }
1896
1897 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1898 handleExtendedMethodMIR(cUnit, mir);
1899 continue;
1900 }
1901
1902 cUnit->currentDalvikOffset = mir->offset;
1903
1904 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1905 InstructionFormat dalvikFormat =
1906 dexGetFormatFromOpcode(dalvikOpcode);
1907
1908 ArmLIR* boundaryLIR;
1909
1910 /* Mark the beginning of a Dalvik instruction for line tracking */
1911 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1912 (int) oatGetDalvikDisassembly(
1913 &mir->dalvikInsn, ""));
1914 /* Remember the first LIR for this block */
1915 if (headLIR == NULL) {
1916 headLIR = boundaryLIR;
1917 /* Set the first boundaryLIR as a scheduling barrier */
1918 headLIR->defMask = ENCODE_ALL;
1919 }
1920
1921 /* Don't generate the SSA annotation unless verbose mode is on */
1922 if (cUnit->printMe && mir->ssaRep) {
1923 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1924 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1925 }
1926
1927 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1928
1929 if (notHandled) {
1930 char buf[100];
1931 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1932 mir->offset,
1933 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1934 dalvikFormat);
1935 LOG(FATAL) << buf;
1936 }
1937 }
1938
1939 if (headLIR) {
1940 /*
1941 * Eliminate redundant loads/stores and delay stores into later
1942 * slots
1943 */
1944 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1945 cUnit->lastLIRInsn);
1946
1947 /*
1948 * Generate an unconditional branch to the fallthrough block.
1949 */
1950 if (bb->fallThrough) {
1951 genUnconditionalBranch(cUnit,
1952 &labelList[bb->fallThrough->id]);
1953 }
1954 }
1955 return false;
1956}
1957
1958/*
1959 * Nop any unconditional branches that go to the next instruction.
1960 * Note: new redundant branches may be inserted later, and we'll
1961 * use a check in final instruction assembly to nop those out.
1962 */
1963void removeRedundantBranches(CompilationUnit* cUnit)
1964{
1965 ArmLIR* thisLIR;
1966
1967 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1968 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1969 thisLIR = NEXT_LIR(thisLIR)) {
1970
1971 /* Branch to the next instruction */
1972 if ((thisLIR->opcode == kThumbBUncond) ||
1973 (thisLIR->opcode == kThumb2BUncond)) {
1974 ArmLIR* nextLIR = thisLIR;
1975
1976 while (true) {
1977 nextLIR = NEXT_LIR(nextLIR);
1978
1979 /*
1980 * Is the branch target the next instruction?
1981 */
1982 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1983 thisLIR->flags.isNop = true;
1984 break;
1985 }
1986
1987 /*
1988 * Found real useful stuff between the branch and the target.
1989 * Need to explicitly check the lastLIRInsn here because it
1990 * might be the last real instruction.
1991 */
1992 if (!isPseudoOpcode(nextLIR->opcode) ||
1993 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1994 break;
1995 }
1996 }
1997 }
1998}
1999
buzbee5ade1d22011-09-09 14:44:52 -07002000static void handleThrowLaunchpads(CompilationUnit *cUnit)
2001{
2002 ArmLIR** throwLabel =
2003 (ArmLIR **) cUnit->throwLaunchpads.elemList;
2004 int numElems = cUnit->throwLaunchpads.numUsed;
2005 int i;
2006
2007 for (i = 0; i < numElems; i++) {
2008 ArmLIR* lab = throwLabel[i];
2009 cUnit->currentDalvikOffset = lab->operands[1];
2010 oatAppendLIR(cUnit, (LIR *)lab);
2011 int funcOffset = 0;
2012 int v1 = lab->operands[2];
2013 int v2 = lab->operands[3];
2014 switch(lab->operands[0]) {
2015 case kArmThrowNullPointer:
2016 funcOffset = OFFSETOF_MEMBER(Thread, pThrowNullPointerFromCode);
2017 break;
2018 case kArmThrowArrayBounds:
2019 if (v2 != r0) {
2020 genRegCopy(cUnit, r0, v1);
2021 genRegCopy(cUnit, r1, v2);
2022 } else {
2023 if (v1 == r1) {
2024 genRegCopy(cUnit, r12, v1);
2025 genRegCopy(cUnit, r1, v2);
2026 genRegCopy(cUnit, r0, r12);
2027 } else {
2028 genRegCopy(cUnit, r1, v2);
2029 genRegCopy(cUnit, r0, v1);
2030 }
2031 }
2032 funcOffset = OFFSETOF_MEMBER(Thread, pThrowArrayBoundsFromCode);
2033 break;
2034 case kArmThrowDivZero:
2035 funcOffset = OFFSETOF_MEMBER(Thread, pThrowDivZeroFromCode);
2036 break;
2037 case kArmThrowVerificationError:
2038 loadConstant(cUnit, r0, v1);
2039 loadConstant(cUnit, r1, v2);
2040 funcOffset =
2041 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode);
2042 break;
2043 case kArmThrowNegArraySize:
2044 genRegCopy(cUnit, r0, v1);
2045 funcOffset =
2046 OFFSETOF_MEMBER(Thread, pThrowNegArraySizeFromCode);
2047 break;
2048 case kArmThrowInternalError:
2049 genRegCopy(cUnit, r0, v1);
2050 funcOffset =
2051 OFFSETOF_MEMBER(Thread, pThrowInternalErrorFromCode);
2052 break;
2053 case kArmThrowRuntimeException:
2054 genRegCopy(cUnit, r0, v1);
2055 funcOffset =
2056 OFFSETOF_MEMBER(Thread, pThrowRuntimeExceptionFromCode);
2057 break;
2058 case kArmThrowNoSuchMethod:
2059 genRegCopy(cUnit, r0, v1);
2060 funcOffset =
2061 OFFSETOF_MEMBER(Thread, pThrowNoSuchMethodFromCode);
2062 break;
buzbeeec5adf32011-09-11 15:25:43 -07002063 case kArmThrowStackOverflow:
2064 funcOffset =
2065 OFFSETOF_MEMBER(Thread, pStackOverflowFromCode);
2066 // Restore stack alignment
2067 opRegImm(cUnit, kOpAdd, rSP, cUnit->numSpills * 4);
2068 break;
buzbee5ade1d22011-09-09 14:44:52 -07002069 default:
2070 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
2071 }
2072 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
buzbeeec5adf32011-09-11 15:25:43 -07002073 callUnwindableHelper(cUnit, rLR);
buzbee5ade1d22011-09-09 14:44:52 -07002074 }
2075}
2076
buzbee67bf8852011-08-17 17:51:35 -07002077void oatMethodMIR2LIR(CompilationUnit* cUnit)
2078{
2079 /* Used to hold the labels of each block */
2080 cUnit->blockLabelList =
2081 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
2082
2083 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
2084 kPreOrderDFSTraversal, false /* Iterative */);
2085 removeRedundantBranches(cUnit);
buzbee5ade1d22011-09-09 14:44:52 -07002086
2087 handleThrowLaunchpads(cUnit);
buzbee67bf8852011-08-17 17:51:35 -07002088}
2089
2090/* Common initialization routine for an architecture family */
2091bool oatArchInit()
2092{
2093 int i;
2094
2095 for (i = 0; i < kArmLast; i++) {
2096 if (EncodingMap[i].opcode != i) {
2097 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
2098 " is wrong: expecting " << i << ", seeing " <<
2099 (int)EncodingMap[i].opcode;
2100 }
2101 }
2102
2103 return oatArchVariantInit();
2104}
2105
2106/* Needed by the Assembler */
2107void oatSetupResourceMasks(ArmLIR* lir)
2108{
2109 setupResourceMasks(lir);
2110}
2111
2112/* Needed by the ld/st optmizatons */
2113ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
2114{
2115 return genRegCopyNoInsert(cUnit, rDest, rSrc);
2116}
2117
2118/* Needed by the register allocator */
2119ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
2120{
2121 return genRegCopy(cUnit, rDest, rSrc);
2122}
2123
2124/* Needed by the register allocator */
2125void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
2126 int srcLo, int srcHi)
2127{
2128 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
2129}
2130
2131void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
2132 int displacement, int rSrc, OpSize size)
2133{
2134 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
2135}
2136
2137void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
2138 int displacement, int rSrcLo, int rSrcHi)
2139{
2140 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
2141}