blob: a34d2a9e76b9fe5c29610dbc1b5b814e7255a561 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/compiler_internals.h"
19#include "dex/quick/mir_to_lir-inl.h"
20#include "mirror/array.h"
21#include "oat/runtime/oat_support_entrypoints.h"
22#include "verifier/method_verifier.h"
23
24namespace art {
25
26/*
27 * This source files contains "gen" codegen routines that should
28 * be applicable to most targets. Only mid-level support utilities
29 * and "op" calls may be used here.
30 */
31
32/*
33 * Generate an kPseudoBarrier marker to indicate the boundary of special
34 * blocks.
35 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070036void Mir2Lir::GenBarrier() {
Brian Carlstrom7940e442013-07-12 13:46:57 -070037 LIR* barrier = NewLIR0(kPseudoBarrier);
38 /* Mark all resources as being clobbered */
39 barrier->def_mask = -1;
40}
41
42// FIXME: need to do some work to split out targets with
43// condition codes and those without
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070044LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070045 DCHECK_NE(cu_->instruction_set, kMips);
46 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
47 LIR* branch = OpCondBranch(c_code, tgt);
48 // Remember branch target - will process later
49 throw_launchpads_.Insert(tgt);
50 return branch;
51}
52
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070053LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070054 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
55 LIR* branch;
56 if (c_code == kCondAl) {
57 branch = OpUnconditionalBranch(tgt);
58 } else {
59 branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
60 }
61 // Remember branch target - will process later
62 throw_launchpads_.Insert(tgt);
63 return branch;
64}
65
66/* Perform null-check on a register. */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070067LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070068 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
69 opt_flags & MIR_IGNORE_NULL_CHECK) {
70 return NULL;
71 }
72 return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
73}
74
75/* Perform check on two registers */
76LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070077 ThrowKind kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070078 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
79 LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
80 // Remember branch target - will process later
81 throw_launchpads_.Insert(tgt);
82 return branch;
83}
84
85void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
86 RegLocation rl_src2, LIR* taken,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070087 LIR* fall_through) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070088 ConditionCode cond;
89 switch (opcode) {
90 case Instruction::IF_EQ:
91 cond = kCondEq;
92 break;
93 case Instruction::IF_NE:
94 cond = kCondNe;
95 break;
96 case Instruction::IF_LT:
97 cond = kCondLt;
98 break;
99 case Instruction::IF_GE:
100 cond = kCondGe;
101 break;
102 case Instruction::IF_GT:
103 cond = kCondGt;
104 break;
105 case Instruction::IF_LE:
106 cond = kCondLe;
107 break;
108 default:
109 cond = static_cast<ConditionCode>(0);
110 LOG(FATAL) << "Unexpected opcode " << opcode;
111 }
112
113 // Normalize such that if either operand is constant, src2 will be constant
114 if (rl_src1.is_const) {
115 RegLocation rl_temp = rl_src1;
116 rl_src1 = rl_src2;
117 rl_src2 = rl_temp;
118 cond = FlipComparisonOrder(cond);
119 }
120
121 rl_src1 = LoadValue(rl_src1, kCoreReg);
122 // Is this really an immediate comparison?
123 if (rl_src2.is_const) {
124 // If it's already live in a register or not easily materialized, just keep going
125 RegLocation rl_temp = UpdateLoc(rl_src2);
126 if ((rl_temp.location == kLocDalvikFrame) &&
127 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
128 // OK - convert this to a compare immediate and branch
129 OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
130 OpUnconditionalBranch(fall_through);
131 return;
132 }
133 }
134 rl_src2 = LoadValue(rl_src2, kCoreReg);
135 OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
136 OpUnconditionalBranch(fall_through);
137}
138
139void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700140 LIR* fall_through) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700141 ConditionCode cond;
142 rl_src = LoadValue(rl_src, kCoreReg);
143 switch (opcode) {
144 case Instruction::IF_EQZ:
145 cond = kCondEq;
146 break;
147 case Instruction::IF_NEZ:
148 cond = kCondNe;
149 break;
150 case Instruction::IF_LTZ:
151 cond = kCondLt;
152 break;
153 case Instruction::IF_GEZ:
154 cond = kCondGe;
155 break;
156 case Instruction::IF_GTZ:
157 cond = kCondGt;
158 break;
159 case Instruction::IF_LEZ:
160 cond = kCondLe;
161 break;
162 default:
163 cond = static_cast<ConditionCode>(0);
164 LOG(FATAL) << "Unexpected opcode " << opcode;
165 }
166 OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
167 OpUnconditionalBranch(fall_through);
168}
169
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700170void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700171 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
172 if (rl_src.location == kLocPhysReg) {
173 OpRegCopy(rl_result.low_reg, rl_src.low_reg);
174 } else {
175 LoadValueDirect(rl_src, rl_result.low_reg);
176 }
177 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
178 StoreValueWide(rl_dest, rl_result);
179}
180
181void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700182 RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700183 rl_src = LoadValue(rl_src, kCoreReg);
184 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
185 OpKind op = kOpInvalid;
186 switch (opcode) {
187 case Instruction::INT_TO_BYTE:
188 op = kOp2Byte;
189 break;
190 case Instruction::INT_TO_SHORT:
191 op = kOp2Short;
192 break;
193 case Instruction::INT_TO_CHAR:
194 op = kOp2Char;
195 break;
196 default:
197 LOG(ERROR) << "Bad int conversion type";
198 }
199 OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
200 StoreValue(rl_dest, rl_result);
201}
202
203/*
204 * Let helper function take care of everything. Will call
205 * Array::AllocFromCode(type_idx, method, count);
206 * Note: AllocFromCode will handle checks for errNegativeArraySize.
207 */
208void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700209 RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700210 FlushAllRegs(); /* Everything to home location */
211 int func_offset;
212 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
213 type_idx)) {
214 func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
215 } else {
216 func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
217 }
218 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
219 RegLocation rl_result = GetReturn(false);
220 StoreValue(rl_dest, rl_result);
221}
222
223/*
224 * Similar to GenNewArray, but with post-allocation initialization.
225 * Verifier guarantees we're dealing with an array class. Current
226 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
227 * Current code also throws internal unimp if not 'L', '[' or 'I'.
228 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700229void Mir2Lir::GenFilledNewArray(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700230 int elems = info->num_arg_words;
231 int type_idx = info->index;
232 FlushAllRegs(); /* Everything to home location */
233 int func_offset;
234 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
235 type_idx)) {
236 func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
237 } else {
238 func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
239 }
240 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
241 FreeTemp(TargetReg(kArg2));
242 FreeTemp(TargetReg(kArg1));
243 /*
244 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
245 * return region. Because AllocFromCode placed the new array
246 * in kRet0, we'll just lock it into place. When debugger support is
247 * added, it may be necessary to additionally copy all return
248 * values to a home location in thread-local storage
249 */
250 LockTemp(TargetReg(kRet0));
251
252 // TODO: use the correct component size, currently all supported types
253 // share array alignment with ints (see comment at head of function)
254 size_t component_size = sizeof(int32_t);
255
256 // Having a range of 0 is legal
257 if (info->is_range && (elems > 0)) {
258 /*
259 * Bit of ugliness here. We're going generate a mem copy loop
260 * on the register range, but it is possible that some regs
261 * in the range have been promoted. This is unlikely, but
262 * before generating the copy, we'll just force a flush
263 * of any regs in the source range that have been promoted to
264 * home location.
265 */
266 for (int i = 0; i < elems; i++) {
267 RegLocation loc = UpdateLoc(info->args[i]);
268 if (loc.location == kLocPhysReg) {
269 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
270 loc.low_reg, kWord);
271 }
272 }
273 /*
274 * TUNING note: generated code here could be much improved, but
275 * this is an uncommon operation and isn't especially performance
276 * critical.
277 */
278 int r_src = AllocTemp();
279 int r_dst = AllocTemp();
280 int r_idx = AllocTemp();
281 int r_val = INVALID_REG;
282 switch(cu_->instruction_set) {
283 case kThumb2:
284 r_val = TargetReg(kLr);
285 break;
286 case kX86:
287 FreeTemp(TargetReg(kRet0));
288 r_val = AllocTemp();
289 break;
290 case kMips:
291 r_val = AllocTemp();
292 break;
293 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
294 }
295 // Set up source pointer
296 RegLocation rl_first = info->args[0];
297 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
298 // Set up the target pointer
299 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
300 mirror::Array::DataOffset(component_size).Int32Value());
301 // Set up the loop counter (known to be > 0)
302 LoadConstant(r_idx, elems - 1);
303 // Generate the copy loop. Going backwards for convenience
304 LIR* target = NewLIR0(kPseudoTargetLabel);
305 // Copy next element
306 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
307 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
308 FreeTemp(r_val);
309 OpDecAndBranch(kCondGe, r_idx, target);
310 if (cu_->instruction_set == kX86) {
311 // Restore the target pointer
312 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
313 -mirror::Array::DataOffset(component_size).Int32Value());
314 }
315 } else if (!info->is_range) {
316 // TUNING: interleave
317 for (int i = 0; i < elems; i++) {
318 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
319 StoreBaseDisp(TargetReg(kRet0),
320 mirror::Array::DataOffset(component_size).Int32Value() +
321 i * 4, rl_arg.low_reg, kWord);
322 // If the LoadValue caused a temp to be allocated, free it
323 if (IsTemp(rl_arg.low_reg)) {
324 FreeTemp(rl_arg.low_reg);
325 }
326 }
327 }
328 if (info->result.location != kLocInvalid) {
329 StoreValue(info->result, GetReturn(false /* not fp */));
330 }
331}
332
333void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700334 bool is_object) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700335 int field_offset;
336 int ssb_index;
337 bool is_volatile;
338 bool is_referrers_class;
339 bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
340 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
341 is_referrers_class, is_volatile, true);
342 if (fast_path && !SLOW_FIELD_PATH) {
343 DCHECK_GE(field_offset, 0);
344 int rBase;
345 if (is_referrers_class) {
346 // Fast path, static storage base is this method's class
347 RegLocation rl_method = LoadCurrMethod();
348 rBase = AllocTemp();
349 LoadWordDisp(rl_method.low_reg,
350 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
351 if (IsTemp(rl_method.low_reg)) {
352 FreeTemp(rl_method.low_reg);
353 }
354 } else {
355 // Medium path, static storage base in a different class which requires checks that the other
356 // class is initialized.
357 // TODO: remove initialized check now that we are initializing classes in the compiler driver.
358 DCHECK_GE(ssb_index, 0);
359 // May do runtime call so everything to home locations.
360 FlushAllRegs();
361 // Using fixed register to sync with possible call to runtime support.
362 int r_method = TargetReg(kArg1);
363 LockTemp(r_method);
364 LoadCurrMethodDirect(r_method);
365 rBase = TargetReg(kArg0);
366 LockTemp(rBase);
367 LoadWordDisp(r_method,
368 mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
369 rBase);
370 LoadWordDisp(rBase,
371 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
372 sizeof(int32_t*) * ssb_index, rBase);
373 // rBase now points at appropriate static storage base (Class*)
374 // or NULL if not initialized. Check for NULL and call helper if NULL.
375 // TUNING: fast path should fall through
376 LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
377 LoadConstant(TargetReg(kArg0), ssb_index);
378 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
379 if (cu_->instruction_set == kMips) {
380 // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
381 OpRegCopy(rBase, TargetReg(kRet0));
382 }
383 LIR* skip_target = NewLIR0(kPseudoTargetLabel);
384 branch_over->target = skip_target;
385 FreeTemp(r_method);
386 }
387 // rBase now holds static storage base
388 if (is_long_or_double) {
389 rl_src = LoadValueWide(rl_src, kAnyReg);
390 } else {
391 rl_src = LoadValue(rl_src, kAnyReg);
392 }
393 if (is_volatile) {
394 GenMemBarrier(kStoreStore);
395 }
396 if (is_long_or_double) {
397 StoreBaseDispWide(rBase, field_offset, rl_src.low_reg,
398 rl_src.high_reg);
399 } else {
400 StoreWordDisp(rBase, field_offset, rl_src.low_reg);
401 }
402 if (is_volatile) {
403 GenMemBarrier(kStoreLoad);
404 }
405 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
406 MarkGCCard(rl_src.low_reg, rBase);
407 }
408 FreeTemp(rBase);
409 } else {
410 FlushAllRegs(); // Everything to home locations
411 int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
412 (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
413 : ENTRYPOINT_OFFSET(pSet32Static));
414 CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
415 }
416}
417
418void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700419 bool is_long_or_double, bool is_object) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700420 int field_offset;
421 int ssb_index;
422 bool is_volatile;
423 bool is_referrers_class;
424 bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
425 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
426 is_referrers_class, is_volatile, false);
427 if (fast_path && !SLOW_FIELD_PATH) {
428 DCHECK_GE(field_offset, 0);
429 int rBase;
430 if (is_referrers_class) {
431 // Fast path, static storage base is this method's class
432 RegLocation rl_method = LoadCurrMethod();
433 rBase = AllocTemp();
434 LoadWordDisp(rl_method.low_reg,
435 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
436 } else {
437 // Medium path, static storage base in a different class which requires checks that the other
438 // class is initialized
439 // TODO: remove initialized check now that we are initializing classes in the compiler driver.
440 DCHECK_GE(ssb_index, 0);
441 // May do runtime call so everything to home locations.
442 FlushAllRegs();
443 // Using fixed register to sync with possible call to runtime support.
444 int r_method = TargetReg(kArg1);
445 LockTemp(r_method);
446 LoadCurrMethodDirect(r_method);
447 rBase = TargetReg(kArg0);
448 LockTemp(rBase);
449 LoadWordDisp(r_method,
450 mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
451 rBase);
452 LoadWordDisp(rBase, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
453 sizeof(int32_t*) * ssb_index, rBase);
454 // rBase now points at appropriate static storage base (Class*)
455 // or NULL if not initialized. Check for NULL and call helper if NULL.
456 // TUNING: fast path should fall through
457 LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
458 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
459 if (cu_->instruction_set == kMips) {
460 // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
461 OpRegCopy(rBase, TargetReg(kRet0));
462 }
463 LIR* skip_target = NewLIR0(kPseudoTargetLabel);
464 branch_over->target = skip_target;
465 FreeTemp(r_method);
466 }
467 // rBase now holds static storage base
468 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
469 if (is_volatile) {
470 GenMemBarrier(kLoadLoad);
471 }
472 if (is_long_or_double) {
473 LoadBaseDispWide(rBase, field_offset, rl_result.low_reg,
474 rl_result.high_reg, INVALID_SREG);
475 } else {
476 LoadWordDisp(rBase, field_offset, rl_result.low_reg);
477 }
478 FreeTemp(rBase);
479 if (is_long_or_double) {
480 StoreValueWide(rl_dest, rl_result);
481 } else {
482 StoreValue(rl_dest, rl_result);
483 }
484 } else {
485 FlushAllRegs(); // Everything to home locations
486 int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
487 (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
488 : ENTRYPOINT_OFFSET(pGet32Static));
489 CallRuntimeHelperImm(getterOffset, field_idx, true);
490 if (is_long_or_double) {
491 RegLocation rl_result = GetReturnWide(rl_dest.fp);
492 StoreValueWide(rl_dest, rl_result);
493 } else {
494 RegLocation rl_result = GetReturn(rl_dest.fp);
495 StoreValue(rl_dest, rl_result);
496 }
497 }
498}
499
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700500void Mir2Lir::HandleSuspendLaunchPads() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700501 int num_elems = suspend_launchpads_.Size();
502 int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
503 for (int i = 0; i < num_elems; i++) {
504 ResetRegPool();
505 ResetDefTracking();
506 LIR* lab = suspend_launchpads_.Get(i);
507 LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
508 current_dalvik_offset_ = lab->operands[1];
509 AppendLIR(lab);
510 int r_tgt = CallHelperSetup(helper_offset);
511 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
512 OpUnconditionalBranch(resume_lab);
513 }
514}
515
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700516void Mir2Lir::HandleIntrinsicLaunchPads() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700517 int num_elems = intrinsic_launchpads_.Size();
518 for (int i = 0; i < num_elems; i++) {
519 ResetRegPool();
520 ResetDefTracking();
521 LIR* lab = intrinsic_launchpads_.Get(i);
522 CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
523 current_dalvik_offset_ = info->offset;
524 AppendLIR(lab);
525 // NOTE: GenInvoke handles MarkSafepointPC
526 GenInvoke(info);
527 LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
528 if (resume_lab != NULL) {
529 OpUnconditionalBranch(resume_lab);
530 }
531 }
532}
533
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700534void Mir2Lir::HandleThrowLaunchPads() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700535 int num_elems = throw_launchpads_.Size();
536 for (int i = 0; i < num_elems; i++) {
537 ResetRegPool();
538 ResetDefTracking();
539 LIR* lab = throw_launchpads_.Get(i);
540 current_dalvik_offset_ = lab->operands[1];
541 AppendLIR(lab);
542 int func_offset = 0;
543 int v1 = lab->operands[2];
544 int v2 = lab->operands[3];
545 bool target_x86 = (cu_->instruction_set == kX86);
546 switch (lab->operands[0]) {
547 case kThrowNullPointer:
548 func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
549 break;
550 case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
551 // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
552 if (target_x86) {
553 OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
554 } else {
555 OpRegCopy(TargetReg(kArg1), v1);
556 }
557 // Make sure the following LoadConstant doesn't mess with kArg1.
558 LockTemp(TargetReg(kArg1));
559 LoadConstant(TargetReg(kArg0), v2);
560 func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
561 break;
562 case kThrowArrayBounds:
563 // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
564 if (v2 != TargetReg(kArg0)) {
565 OpRegCopy(TargetReg(kArg0), v1);
566 if (target_x86) {
567 // x86 leaves the array pointer in v2, so load the array length that the handler expects
568 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
569 } else {
570 OpRegCopy(TargetReg(kArg1), v2);
571 }
572 } else {
573 if (v1 == TargetReg(kArg1)) {
574 // Swap v1 and v2, using kArg2 as a temp
575 OpRegCopy(TargetReg(kArg2), v1);
576 if (target_x86) {
577 // x86 leaves the array pointer in v2; load the array length that the handler expects
578 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
579 } else {
580 OpRegCopy(TargetReg(kArg1), v2);
581 }
582 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
583 } else {
584 if (target_x86) {
585 // x86 leaves the array pointer in v2; load the array length that the handler expects
586 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
587 } else {
588 OpRegCopy(TargetReg(kArg1), v2);
589 }
590 OpRegCopy(TargetReg(kArg0), v1);
591 }
592 }
593 func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
594 break;
595 case kThrowDivZero:
596 func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
597 break;
598 case kThrowNoSuchMethod:
599 OpRegCopy(TargetReg(kArg0), v1);
600 func_offset =
601 ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
602 break;
603 case kThrowStackOverflow:
604 func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
605 // Restore stack alignment
606 if (target_x86) {
607 OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
608 } else {
609 OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4);
610 }
611 break;
612 default:
613 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
614 }
615 ClobberCalleeSave();
616 int r_tgt = CallHelperSetup(func_offset);
617 CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */);
618 }
619}
620
621void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
622 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700623 bool is_object) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700624 int field_offset;
625 bool is_volatile;
626
627 bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
628
629 if (fast_path && !SLOW_FIELD_PATH) {
630 RegLocation rl_result;
631 RegisterClass reg_class = oat_reg_class_by_size(size);
632 DCHECK_GE(field_offset, 0);
633 rl_obj = LoadValue(rl_obj, kCoreReg);
634 if (is_long_or_double) {
635 DCHECK(rl_dest.wide);
636 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
637 if (cu_->instruction_set == kX86) {
638 rl_result = EvalLoc(rl_dest, reg_class, true);
639 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
640 LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
641 rl_result.high_reg, rl_obj.s_reg_low);
642 if (is_volatile) {
643 GenMemBarrier(kLoadLoad);
644 }
645 } else {
646 int reg_ptr = AllocTemp();
647 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
648 rl_result = EvalLoc(rl_dest, reg_class, true);
649 LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
650 if (is_volatile) {
651 GenMemBarrier(kLoadLoad);
652 }
653 FreeTemp(reg_ptr);
654 }
655 StoreValueWide(rl_dest, rl_result);
656 } else {
657 rl_result = EvalLoc(rl_dest, reg_class, true);
658 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
659 LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
660 kWord, rl_obj.s_reg_low);
661 if (is_volatile) {
662 GenMemBarrier(kLoadLoad);
663 }
664 StoreValue(rl_dest, rl_result);
665 }
666 } else {
667 int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
668 (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
669 : ENTRYPOINT_OFFSET(pGet32Instance));
670 CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
671 if (is_long_or_double) {
672 RegLocation rl_result = GetReturnWide(rl_dest.fp);
673 StoreValueWide(rl_dest, rl_result);
674 } else {
675 RegLocation rl_result = GetReturn(rl_dest.fp);
676 StoreValue(rl_dest, rl_result);
677 }
678 }
679}
680
681void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
682 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700683 bool is_object) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700684 int field_offset;
685 bool is_volatile;
686
687 bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
688 true);
689 if (fast_path && !SLOW_FIELD_PATH) {
690 RegisterClass reg_class = oat_reg_class_by_size(size);
691 DCHECK_GE(field_offset, 0);
692 rl_obj = LoadValue(rl_obj, kCoreReg);
693 if (is_long_or_double) {
694 int reg_ptr;
695 rl_src = LoadValueWide(rl_src, kAnyReg);
696 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
697 reg_ptr = AllocTemp();
698 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
699 if (is_volatile) {
700 GenMemBarrier(kStoreStore);
701 }
702 StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
703 if (is_volatile) {
704 GenMemBarrier(kLoadLoad);
705 }
706 FreeTemp(reg_ptr);
707 } else {
708 rl_src = LoadValue(rl_src, reg_class);
709 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
710 if (is_volatile) {
711 GenMemBarrier(kStoreStore);
712 }
713 StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
714 if (is_volatile) {
715 GenMemBarrier(kLoadLoad);
716 }
717 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
718 MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
719 }
720 }
721 } else {
722 int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
723 (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
724 : ENTRYPOINT_OFFSET(pSet32Instance));
725 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
726 }
727}
728
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700729void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700730 RegLocation rl_method = LoadCurrMethod();
731 int res_reg = AllocTemp();
732 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
733 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
734 *cu_->dex_file,
735 type_idx)) {
736 // Call out to helper which resolves type and verifies access.
737 // Resolved type returned in kRet0.
738 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
739 type_idx, rl_method.low_reg, true);
740 RegLocation rl_result = GetReturn(false);
741 StoreValue(rl_dest, rl_result);
742 } else {
743 // We're don't need access checks, load type from dex cache
744 int32_t dex_cache_offset =
745 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
746 LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
747 int32_t offset_of_type =
748 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
749 * type_idx);
750 LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
751 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
752 type_idx) || SLOW_TYPE_PATH) {
753 // Slow path, at runtime test if type is null and if so initialize
754 FlushAllRegs();
755 LIR* branch1 = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
756 // Resolved, store and hop over following code
757 StoreValue(rl_dest, rl_result);
758 /*
759 * Because we have stores of the target value on two paths,
760 * clobber temp tracking for the destination using the ssa name
761 */
762 ClobberSReg(rl_dest.s_reg_low);
763 LIR* branch2 = OpUnconditionalBranch(0);
764 // TUNING: move slow path to end & remove unconditional branch
765 LIR* target1 = NewLIR0(kPseudoTargetLabel);
766 // Call out to helper, which will return resolved type in kArg0
767 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
768 rl_method.low_reg, true);
769 RegLocation rl_result = GetReturn(false);
770 StoreValue(rl_dest, rl_result);
771 /*
772 * Because we have stores of the target value on two paths,
773 * clobber temp tracking for the destination using the ssa name
774 */
775 ClobberSReg(rl_dest.s_reg_low);
776 // Rejoin code paths
777 LIR* target2 = NewLIR0(kPseudoTargetLabel);
778 branch1->target = target1;
779 branch2->target = target2;
780 } else {
781 // Fast path, we're done - just store result
782 StoreValue(rl_dest, rl_result);
783 }
784 }
785}
786
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700787void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700788 /* NOTE: Most strings should be available at compile time */
789 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
790 (sizeof(mirror::String*) * string_idx);
791 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
792 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
793 // slow path, resolve string if not in dex cache
794 FlushAllRegs();
795 LockCallTemps(); // Using explicit registers
796 LoadCurrMethodDirect(TargetReg(kArg2));
797 LoadWordDisp(TargetReg(kArg2),
798 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
799 // Might call out to helper, which will return resolved string in kRet0
800 int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode));
801 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
802 LoadConstant(TargetReg(kArg1), string_idx);
803 if (cu_->instruction_set == kThumb2) {
804 OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved?
805 GenBarrier();
806 // For testing, always force through helper
807 if (!EXERCISE_SLOWEST_STRING_PATH) {
808 OpIT(kCondEq, "T");
809 }
810 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .eq
811 LIR* call_inst = OpReg(kOpBlx, r_tgt); // .eq, helper(Method*, string_idx)
812 MarkSafepointPC(call_inst);
813 FreeTemp(r_tgt);
814 } else if (cu_->instruction_set == kMips) {
815 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
816 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .eq
817 LIR* call_inst = OpReg(kOpBlx, r_tgt);
818 MarkSafepointPC(call_inst);
819 FreeTemp(r_tgt);
820 LIR* target = NewLIR0(kPseudoTargetLabel);
821 branch->target = target;
822 } else {
823 DCHECK_EQ(cu_->instruction_set, kX86);
824 CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
825 }
826 GenBarrier();
827 StoreValue(rl_dest, GetReturn(false));
828 } else {
829 RegLocation rl_method = LoadCurrMethod();
830 int res_reg = AllocTemp();
831 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
832 LoadWordDisp(rl_method.low_reg,
833 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
834 LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
835 StoreValue(rl_dest, rl_result);
836 }
837}
838
839/*
840 * Let helper function take care of everything. Will
841 * call Class::NewInstanceFromCode(type_idx, method);
842 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700843void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700844 FlushAllRegs(); /* Everything to home location */
845 // alloc will always check for resolution, do we also need to verify
846 // access because the verifier was unable to?
847 int func_offset;
848 if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
849 cu_->method_idx, *cu_->dex_file, type_idx)) {
850 func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
851 } else {
852 func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
853 }
854 CallRuntimeHelperImmMethod(func_offset, type_idx, true);
855 RegLocation rl_result = GetReturn(false);
856 StoreValue(rl_dest, rl_result);
857}
858
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700859void Mir2Lir::GenThrow(RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700860 FlushAllRegs();
861 CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
862}
863
864// For final classes there are no sub-classes to check and so we can answer the instance-of
865// question with simple comparisons.
866void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
867 RegLocation rl_src) {
868 RegLocation object = LoadValue(rl_src, kCoreReg);
869 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
870 int result_reg = rl_result.low_reg;
871 if (result_reg == object.low_reg) {
872 result_reg = AllocTypedTemp(false, kCoreReg);
873 }
874 LoadConstant(result_reg, 0); // assume false
875 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
876
877 int check_class = AllocTypedTemp(false, kCoreReg);
878 int object_class = AllocTypedTemp(false, kCoreReg);
879
880 LoadCurrMethodDirect(check_class);
881 if (use_declaring_class) {
882 LoadWordDisp(check_class, mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
883 check_class);
884 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
885 } else {
886 LoadWordDisp(check_class, mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(),
887 check_class);
888 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
889 int32_t offset_of_type =
890 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
891 (sizeof(mirror::Class*) * type_idx);
892 LoadWordDisp(check_class, offset_of_type, check_class);
893 }
894
895 LIR* ne_branchover = NULL;
896 if (cu_->instruction_set == kThumb2) {
897 OpRegReg(kOpCmp, check_class, object_class); // Same?
898 OpIT(kCondEq, ""); // if-convert the test
899 LoadConstant(result_reg, 1); // .eq case - load true
900 } else {
901 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
902 LoadConstant(result_reg, 1); // eq case - load true
903 }
904 LIR* target = NewLIR0(kPseudoTargetLabel);
905 null_branchover->target = target;
906 if (ne_branchover != NULL) {
907 ne_branchover->target = target;
908 }
909 FreeTemp(object_class);
910 FreeTemp(check_class);
911 if (IsTemp(result_reg)) {
912 OpRegCopy(rl_result.low_reg, result_reg);
913 FreeTemp(result_reg);
914 }
915 StoreValue(rl_dest, rl_result);
916}
917
918void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
919 bool type_known_abstract, bool use_declaring_class,
920 bool can_assume_type_is_in_dex_cache,
921 uint32_t type_idx, RegLocation rl_dest,
922 RegLocation rl_src) {
923 FlushAllRegs();
924 // May generate a call - use explicit registers
925 LockCallTemps();
926 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
927 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
928 if (needs_access_check) {
929 // Check we have access to type_idx and if not throw IllegalAccessError,
930 // returns Class* in kArg0
931 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
932 type_idx, true);
933 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
934 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
935 } else if (use_declaring_class) {
936 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
937 LoadWordDisp(TargetReg(kArg1),
938 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
939 } else {
940 // Load dex cache entry into class_reg (kArg2)
941 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
942 LoadWordDisp(TargetReg(kArg1),
943 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
944 int32_t offset_of_type =
945 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
946 * type_idx);
947 LoadWordDisp(class_reg, offset_of_type, class_reg);
948 if (!can_assume_type_is_in_dex_cache) {
949 // Need to test presence of type in dex cache at runtime
950 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
951 // Not resolved
952 // Call out to helper, which will return resolved type in kRet0
953 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
954 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
955 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */
956 // Rejoin code paths
957 LIR* hop_target = NewLIR0(kPseudoTargetLabel);
958 hop_branch->target = hop_target;
959 }
960 }
961 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
962 RegLocation rl_result = GetReturn(false);
963 if (cu_->instruction_set == kMips) {
964 // On MIPS rArg0 != rl_result, place false in result if branch is taken.
965 LoadConstant(rl_result.low_reg, 0);
966 }
967 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
968
969 /* load object->klass_ */
970 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
971 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
972 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
973 LIR* branchover = NULL;
974 if (type_known_final) {
975 // rl_result == ref == null == 0.
976 if (cu_->instruction_set == kThumb2) {
977 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
978 OpIT(kCondEq, "E"); // if-convert the test
979 LoadConstant(rl_result.low_reg, 1); // .eq case - load true
980 LoadConstant(rl_result.low_reg, 0); // .ne case - load false
981 } else {
982 LoadConstant(rl_result.low_reg, 0); // ne case - load false
983 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
984 LoadConstant(rl_result.low_reg, 1); // eq case - load true
985 }
986 } else {
987 if (cu_->instruction_set == kThumb2) {
988 int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
989 if (!type_known_abstract) {
990 /* Uses conditional nullification */
991 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
992 OpIT(kCondEq, "EE"); // if-convert the test
993 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true
994 }
995 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
996 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
997 FreeTemp(r_tgt);
998 } else {
999 if (!type_known_abstract) {
1000 /* Uses branchovers */
1001 LoadConstant(rl_result.low_reg, 1); // assume true
1002 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
1003 }
1004 if (cu_->instruction_set != kX86) {
1005 int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
1006 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
1007 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
1008 FreeTemp(r_tgt);
1009 } else {
1010 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
1011 OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
1012 }
1013 }
1014 }
1015 // TODO: only clobber when type isn't final?
1016 ClobberCalleeSave();
1017 /* branch targets here */
1018 LIR* target = NewLIR0(kPseudoTargetLabel);
1019 StoreValue(rl_dest, rl_result);
1020 branch1->target = target;
1021 if (branchover != NULL) {
1022 branchover->target = target;
1023 }
1024}
1025
1026void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
1027 bool type_known_final, type_known_abstract, use_declaring_class;
1028 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1029 *cu_->dex_file,
1030 type_idx,
1031 &type_known_final,
1032 &type_known_abstract,
1033 &use_declaring_class);
1034 bool can_assume_type_is_in_dex_cache = !needs_access_check &&
1035 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
1036
1037 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
1038 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
1039 } else {
1040 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
1041 use_declaring_class, can_assume_type_is_in_dex_cache,
1042 type_idx, rl_dest, rl_src);
1043 }
1044}
1045
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001046void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001047 bool type_known_final, type_known_abstract, use_declaring_class;
1048 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1049 *cu_->dex_file,
1050 type_idx,
1051 &type_known_final,
1052 &type_known_abstract,
1053 &use_declaring_class);
1054 // Note: currently type_known_final is unused, as optimizing will only improve the performance
1055 // of the exception throw path.
1056 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
1057 const MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex());
1058 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) {
1059 // Verifier type analysis proved this check cast would never cause an exception.
1060 return;
1061 }
1062 FlushAllRegs();
1063 // May generate a call - use explicit registers
1064 LockCallTemps();
1065 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
1066 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
1067 if (needs_access_check) {
1068 // Check we have access to type_idx and if not throw IllegalAccessError,
1069 // returns Class* in kRet0
1070 // InitializeTypeAndVerifyAccess(idx, method)
1071 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
1072 type_idx, TargetReg(kArg1), true);
1073 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
1074 } else if (use_declaring_class) {
1075 LoadWordDisp(TargetReg(kArg1),
1076 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
1077 } else {
1078 // Load dex cache entry into class_reg (kArg2)
1079 LoadWordDisp(TargetReg(kArg1),
1080 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
1081 int32_t offset_of_type =
1082 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1083 (sizeof(mirror::Class*) * type_idx);
1084 LoadWordDisp(class_reg, offset_of_type, class_reg);
1085 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
1086 // Need to test presence of type in dex cache at runtime
1087 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
1088 // Not resolved
1089 // Call out to helper, which will return resolved type in kArg0
1090 // InitializeTypeFromCode(idx, method)
1091 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
1092 true);
1093 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
1094 // Rejoin code paths
1095 LIR* hop_target = NewLIR0(kPseudoTargetLabel);
1096 hop_branch->target = hop_target;
1097 }
1098 }
1099 // At this point, class_reg (kArg2) has class
1100 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
1101 /* Null is OK - continue */
1102 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1103 /* load object->klass_ */
1104 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1105 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1106 /* kArg1 now contains object->klass_ */
1107 LIR* branch2 = NULL;
1108 if (!type_known_abstract) {
1109 branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
1110 }
1111 CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2),
1112 true);
1113 /* branch target here */
1114 LIR* target = NewLIR0(kPseudoTargetLabel);
1115 branch1->target = target;
1116 if (branch2 != NULL) {
1117 branch2->target = target;
1118 }
1119}
1120
1121void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001122 RegLocation rl_src1, RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001123 RegLocation rl_result;
1124 if (cu_->instruction_set == kThumb2) {
1125 /*
1126 * NOTE: This is the one place in the code in which we might have
1127 * as many as six live temporary registers. There are 5 in the normal
1128 * set for Arm. Until we have spill capabilities, temporarily add
1129 * lr to the temp set. It is safe to do this locally, but note that
1130 * lr is used explicitly elsewhere in the code generator and cannot
1131 * normally be used as a general temp register.
1132 */
1133 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool
1134 FreeTemp(TargetReg(kLr)); // and make it available
1135 }
1136 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1137 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1138 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1139 // The longs may overlap - use intermediate temp if so
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001140 if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001141 int t_reg = AllocTemp();
1142 OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
1143 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
1144 OpRegCopy(rl_result.low_reg, t_reg);
1145 FreeTemp(t_reg);
1146 } else {
1147 OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
1148 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
1149 rl_src2.high_reg);
1150 }
1151 /*
1152 * NOTE: If rl_dest refers to a frame variable in a large frame, the
1153 * following StoreValueWide might need to allocate a temp register.
1154 * To further work around the lack of a spill capability, explicitly
1155 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
1156 * Remove when spill is functional.
1157 */
1158 FreeRegLocTemps(rl_result, rl_src1);
1159 FreeRegLocTemps(rl_result, rl_src2);
1160 StoreValueWide(rl_dest, rl_result);
1161 if (cu_->instruction_set == kThumb2) {
1162 Clobber(TargetReg(kLr));
1163 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool
1164 }
1165}
1166
1167
1168void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001169 RegLocation rl_src1, RegLocation rl_shift) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001170 int func_offset = -1; // Make gcc happy
1171
1172 switch (opcode) {
1173 case Instruction::SHL_LONG:
1174 case Instruction::SHL_LONG_2ADDR:
1175 func_offset = ENTRYPOINT_OFFSET(pShlLong);
1176 break;
1177 case Instruction::SHR_LONG:
1178 case Instruction::SHR_LONG_2ADDR:
1179 func_offset = ENTRYPOINT_OFFSET(pShrLong);
1180 break;
1181 case Instruction::USHR_LONG:
1182 case Instruction::USHR_LONG_2ADDR:
1183 func_offset = ENTRYPOINT_OFFSET(pUshrLong);
1184 break;
1185 default:
1186 LOG(FATAL) << "Unexpected case";
1187 }
1188 FlushAllRegs(); /* Send everything to home location */
1189 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
1190 RegLocation rl_result = GetReturnWide(false);
1191 StoreValueWide(rl_dest, rl_result);
1192}
1193
1194
1195void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001196 RegLocation rl_src1, RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001197 OpKind op = kOpBkpt;
1198 bool is_div_rem = false;
1199 bool check_zero = false;
1200 bool unary = false;
1201 RegLocation rl_result;
1202 bool shift_op = false;
1203 switch (opcode) {
1204 case Instruction::NEG_INT:
1205 op = kOpNeg;
1206 unary = true;
1207 break;
1208 case Instruction::NOT_INT:
1209 op = kOpMvn;
1210 unary = true;
1211 break;
1212 case Instruction::ADD_INT:
1213 case Instruction::ADD_INT_2ADDR:
1214 op = kOpAdd;
1215 break;
1216 case Instruction::SUB_INT:
1217 case Instruction::SUB_INT_2ADDR:
1218 op = kOpSub;
1219 break;
1220 case Instruction::MUL_INT:
1221 case Instruction::MUL_INT_2ADDR:
1222 op = kOpMul;
1223 break;
1224 case Instruction::DIV_INT:
1225 case Instruction::DIV_INT_2ADDR:
1226 check_zero = true;
1227 op = kOpDiv;
1228 is_div_rem = true;
1229 break;
1230 /* NOTE: returns in kArg1 */
1231 case Instruction::REM_INT:
1232 case Instruction::REM_INT_2ADDR:
1233 check_zero = true;
1234 op = kOpRem;
1235 is_div_rem = true;
1236 break;
1237 case Instruction::AND_INT:
1238 case Instruction::AND_INT_2ADDR:
1239 op = kOpAnd;
1240 break;
1241 case Instruction::OR_INT:
1242 case Instruction::OR_INT_2ADDR:
1243 op = kOpOr;
1244 break;
1245 case Instruction::XOR_INT:
1246 case Instruction::XOR_INT_2ADDR:
1247 op = kOpXor;
1248 break;
1249 case Instruction::SHL_INT:
1250 case Instruction::SHL_INT_2ADDR:
1251 shift_op = true;
1252 op = kOpLsl;
1253 break;
1254 case Instruction::SHR_INT:
1255 case Instruction::SHR_INT_2ADDR:
1256 shift_op = true;
1257 op = kOpAsr;
1258 break;
1259 case Instruction::USHR_INT:
1260 case Instruction::USHR_INT_2ADDR:
1261 shift_op = true;
1262 op = kOpLsr;
1263 break;
1264 default:
1265 LOG(FATAL) << "Invalid word arith op: " << opcode;
1266 }
1267 if (!is_div_rem) {
1268 if (unary) {
1269 rl_src1 = LoadValue(rl_src1, kCoreReg);
1270 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1271 OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
1272 } else {
1273 if (shift_op) {
1274 int t_reg = INVALID_REG;
1275 if (cu_->instruction_set == kX86) {
1276 // X86 doesn't require masking and must use ECX
1277 t_reg = TargetReg(kCount); // rCX
1278 LoadValueDirectFixed(rl_src2, t_reg);
1279 } else {
1280 rl_src2 = LoadValue(rl_src2, kCoreReg);
1281 t_reg = AllocTemp();
1282 OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
1283 }
1284 rl_src1 = LoadValue(rl_src1, kCoreReg);
1285 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1286 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
1287 FreeTemp(t_reg);
1288 } else {
1289 rl_src1 = LoadValue(rl_src1, kCoreReg);
1290 rl_src2 = LoadValue(rl_src2, kCoreReg);
1291 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1292 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
1293 }
1294 }
1295 StoreValue(rl_dest, rl_result);
1296 } else {
1297 if (cu_->instruction_set == kMips) {
1298 rl_src1 = LoadValue(rl_src1, kCoreReg);
1299 rl_src2 = LoadValue(rl_src2, kCoreReg);
1300 if (check_zero) {
1301 GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
1302 }
1303 rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
1304 } else {
1305 int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
1306 FlushAllRegs(); /* Send everything to home location */
1307 LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
1308 int r_tgt = CallHelperSetup(func_offset);
1309 LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
1310 if (check_zero) {
1311 GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
1312 }
1313 // NOTE: callout here is not a safepoint
1314 CallHelper(r_tgt, func_offset, false /* not a safepoint */ );
1315 if (op == kOpDiv)
1316 rl_result = GetReturn(false);
1317 else
1318 rl_result = GetReturnAlt();
1319 }
1320 StoreValue(rl_dest, rl_result);
1321 }
1322}
1323
1324/*
1325 * The following are the first-level codegen routines that analyze the format
1326 * of each bytecode then either dispatch special purpose codegen routines
1327 * or produce corresponding Thumb instructions directly.
1328 */
1329
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001330static bool IsPowerOfTwo(int x) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001331 return (x & (x - 1)) == 0;
1332}
1333
1334// Returns true if no more than two bits are set in 'x'.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001335static bool IsPopCountLE2(unsigned int x) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001336 x &= x - 1;
1337 return (x & (x - 1)) == 0;
1338}
1339
1340// Returns the index of the lowest set bit in 'x'.
1341static int LowestSetBit(unsigned int x) {
1342 int bit_posn = 0;
1343 while ((x & 0xf) == 0) {
1344 bit_posn += 4;
1345 x >>= 4;
1346 }
1347 while ((x & 1) == 0) {
1348 bit_posn++;
1349 x >>= 1;
1350 }
1351 return bit_posn;
1352}
1353
1354// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
1355// and store the result in 'rl_dest'.
1356bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001357 RegLocation rl_src, RegLocation rl_dest, int lit) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001358 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
1359 return false;
1360 }
1361 // No divide instruction for Arm, so check for more special cases
1362 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
1363 return SmallLiteralDivide(dalvik_opcode, rl_src, rl_dest, lit);
1364 }
1365 int k = LowestSetBit(lit);
1366 if (k >= 30) {
1367 // Avoid special cases.
1368 return false;
1369 }
1370 bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
1371 dalvik_opcode == Instruction::DIV_INT_LIT16);
1372 rl_src = LoadValue(rl_src, kCoreReg);
1373 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1374 if (div) {
1375 int t_reg = AllocTemp();
1376 if (lit == 2) {
1377 // Division by 2 is by far the most common division by constant.
1378 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
1379 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
1380 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
1381 } else {
1382 OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
1383 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
1384 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
1385 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
1386 }
1387 } else {
1388 int t_reg1 = AllocTemp();
1389 int t_reg2 = AllocTemp();
1390 if (lit == 2) {
1391 OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
1392 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
1393 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
1394 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
1395 } else {
1396 OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
1397 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
1398 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
1399 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
1400 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
1401 }
1402 }
1403 StoreValue(rl_dest, rl_result);
1404 return true;
1405}
1406
1407// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
1408// and store the result in 'rl_dest'.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001409bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001410 // Can we simplify this multiplication?
1411 bool power_of_two = false;
1412 bool pop_count_le2 = false;
1413 bool power_of_two_minus_one = false;
1414 if (lit < 2) {
1415 // Avoid special cases.
1416 return false;
1417 } else if (IsPowerOfTwo(lit)) {
1418 power_of_two = true;
1419 } else if (IsPopCountLE2(lit)) {
1420 pop_count_le2 = true;
1421 } else if (IsPowerOfTwo(lit + 1)) {
1422 power_of_two_minus_one = true;
1423 } else {
1424 return false;
1425 }
1426 rl_src = LoadValue(rl_src, kCoreReg);
1427 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1428 if (power_of_two) {
1429 // Shift.
1430 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
1431 } else if (pop_count_le2) {
1432 // Shift and add and shift.
1433 int first_bit = LowestSetBit(lit);
1434 int second_bit = LowestSetBit(lit ^ (1 << first_bit));
1435 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
1436 } else {
1437 // Reverse subtract: (src << (shift + 1)) - src.
1438 DCHECK(power_of_two_minus_one);
1439 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
1440 int t_reg = AllocTemp();
1441 OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
1442 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
1443 }
1444 StoreValue(rl_dest, rl_result);
1445 return true;
1446}
1447
1448void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001449 int lit) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001450 RegLocation rl_result;
1451 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
1452 int shift_op = false;
1453 bool is_div = false;
1454
1455 switch (opcode) {
1456 case Instruction::RSUB_INT_LIT8:
1457 case Instruction::RSUB_INT: {
1458 rl_src = LoadValue(rl_src, kCoreReg);
1459 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1460 if (cu_->instruction_set == kThumb2) {
1461 OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
1462 } else {
1463 OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
1464 OpRegImm(kOpAdd, rl_result.low_reg, lit);
1465 }
1466 StoreValue(rl_dest, rl_result);
1467 return;
1468 }
1469
1470 case Instruction::SUB_INT:
1471 case Instruction::SUB_INT_2ADDR:
1472 lit = -lit;
1473 // Intended fallthrough
1474 case Instruction::ADD_INT:
1475 case Instruction::ADD_INT_2ADDR:
1476 case Instruction::ADD_INT_LIT8:
1477 case Instruction::ADD_INT_LIT16:
1478 op = kOpAdd;
1479 break;
1480 case Instruction::MUL_INT:
1481 case Instruction::MUL_INT_2ADDR:
1482 case Instruction::MUL_INT_LIT8:
1483 case Instruction::MUL_INT_LIT16: {
1484 if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
1485 return;
1486 }
1487 op = kOpMul;
1488 break;
1489 }
1490 case Instruction::AND_INT:
1491 case Instruction::AND_INT_2ADDR:
1492 case Instruction::AND_INT_LIT8:
1493 case Instruction::AND_INT_LIT16:
1494 op = kOpAnd;
1495 break;
1496 case Instruction::OR_INT:
1497 case Instruction::OR_INT_2ADDR:
1498 case Instruction::OR_INT_LIT8:
1499 case Instruction::OR_INT_LIT16:
1500 op = kOpOr;
1501 break;
1502 case Instruction::XOR_INT:
1503 case Instruction::XOR_INT_2ADDR:
1504 case Instruction::XOR_INT_LIT8:
1505 case Instruction::XOR_INT_LIT16:
1506 op = kOpXor;
1507 break;
1508 case Instruction::SHL_INT_LIT8:
1509 case Instruction::SHL_INT:
1510 case Instruction::SHL_INT_2ADDR:
1511 lit &= 31;
1512 shift_op = true;
1513 op = kOpLsl;
1514 break;
1515 case Instruction::SHR_INT_LIT8:
1516 case Instruction::SHR_INT:
1517 case Instruction::SHR_INT_2ADDR:
1518 lit &= 31;
1519 shift_op = true;
1520 op = kOpAsr;
1521 break;
1522 case Instruction::USHR_INT_LIT8:
1523 case Instruction::USHR_INT:
1524 case Instruction::USHR_INT_2ADDR:
1525 lit &= 31;
1526 shift_op = true;
1527 op = kOpLsr;
1528 break;
1529
1530 case Instruction::DIV_INT:
1531 case Instruction::DIV_INT_2ADDR:
1532 case Instruction::DIV_INT_LIT8:
1533 case Instruction::DIV_INT_LIT16:
1534 case Instruction::REM_INT:
1535 case Instruction::REM_INT_2ADDR:
1536 case Instruction::REM_INT_LIT8:
1537 case Instruction::REM_INT_LIT16: {
1538 if (lit == 0) {
1539 GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
1540 return;
1541 }
1542 if (HandleEasyDivide(opcode, rl_src, rl_dest, lit)) {
1543 return;
1544 }
1545 if ((opcode == Instruction::DIV_INT_LIT8) ||
1546 (opcode == Instruction::DIV_INT) ||
1547 (opcode == Instruction::DIV_INT_2ADDR) ||
1548 (opcode == Instruction::DIV_INT_LIT16)) {
1549 is_div = true;
1550 } else {
1551 is_div = false;
1552 }
1553 if (cu_->instruction_set == kMips) {
1554 rl_src = LoadValue(rl_src, kCoreReg);
1555 rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
1556 } else {
1557 FlushAllRegs(); /* Everything to home location */
1558 LoadValueDirectFixed(rl_src, TargetReg(kArg0));
1559 Clobber(TargetReg(kArg0));
1560 int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
1561 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
1562 if (is_div)
1563 rl_result = GetReturn(false);
1564 else
1565 rl_result = GetReturnAlt();
1566 }
1567 StoreValue(rl_dest, rl_result);
1568 return;
1569 }
1570 default:
1571 LOG(FATAL) << "Unexpected opcode " << opcode;
1572 }
1573 rl_src = LoadValue(rl_src, kCoreReg);
1574 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1575 // Avoid shifts by literal 0 - no support in Thumb. Change to copy
1576 if (shift_op && (lit == 0)) {
1577 OpRegCopy(rl_result.low_reg, rl_src.low_reg);
1578 } else {
1579 OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
1580 }
1581 StoreValue(rl_dest, rl_result);
1582}
1583
1584void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001585 RegLocation rl_src1, RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001586 RegLocation rl_result;
1587 OpKind first_op = kOpBkpt;
1588 OpKind second_op = kOpBkpt;
1589 bool call_out = false;
1590 bool check_zero = false;
1591 int func_offset;
1592 int ret_reg = TargetReg(kRet0);
1593
1594 switch (opcode) {
1595 case Instruction::NOT_LONG:
1596 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1597 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1598 // Check for destructive overlap
1599 if (rl_result.low_reg == rl_src2.high_reg) {
1600 int t_reg = AllocTemp();
1601 OpRegCopy(t_reg, rl_src2.high_reg);
1602 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
1603 OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
1604 FreeTemp(t_reg);
1605 } else {
1606 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
1607 OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
1608 }
1609 StoreValueWide(rl_dest, rl_result);
1610 return;
1611 case Instruction::ADD_LONG:
1612 case Instruction::ADD_LONG_2ADDR:
1613 if (cu_->instruction_set != kThumb2) {
1614 GenAddLong(rl_dest, rl_src1, rl_src2);
1615 return;
1616 }
1617 first_op = kOpAdd;
1618 second_op = kOpAdc;
1619 break;
1620 case Instruction::SUB_LONG:
1621 case Instruction::SUB_LONG_2ADDR:
1622 if (cu_->instruction_set != kThumb2) {
1623 GenSubLong(rl_dest, rl_src1, rl_src2);
1624 return;
1625 }
1626 first_op = kOpSub;
1627 second_op = kOpSbc;
1628 break;
1629 case Instruction::MUL_LONG:
1630 case Instruction::MUL_LONG_2ADDR:
1631 if (cu_->instruction_set == kThumb2) {
1632 GenMulLong(rl_dest, rl_src1, rl_src2);
1633 return;
1634 } else {
1635 call_out = true;
1636 ret_reg = TargetReg(kRet0);
1637 func_offset = ENTRYPOINT_OFFSET(pLmul);
1638 }
1639 break;
1640 case Instruction::DIV_LONG:
1641 case Instruction::DIV_LONG_2ADDR:
1642 call_out = true;
1643 check_zero = true;
1644 ret_reg = TargetReg(kRet0);
1645 func_offset = ENTRYPOINT_OFFSET(pLdiv);
1646 break;
1647 case Instruction::REM_LONG:
1648 case Instruction::REM_LONG_2ADDR:
1649 call_out = true;
1650 check_zero = true;
1651 func_offset = ENTRYPOINT_OFFSET(pLdivmod);
1652 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
1653 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
1654 break;
1655 case Instruction::AND_LONG_2ADDR:
1656 case Instruction::AND_LONG:
1657 if (cu_->instruction_set == kX86) {
1658 return GenAndLong(rl_dest, rl_src1, rl_src2);
1659 }
1660 first_op = kOpAnd;
1661 second_op = kOpAnd;
1662 break;
1663 case Instruction::OR_LONG:
1664 case Instruction::OR_LONG_2ADDR:
1665 if (cu_->instruction_set == kX86) {
1666 GenOrLong(rl_dest, rl_src1, rl_src2);
1667 return;
1668 }
1669 first_op = kOpOr;
1670 second_op = kOpOr;
1671 break;
1672 case Instruction::XOR_LONG:
1673 case Instruction::XOR_LONG_2ADDR:
1674 if (cu_->instruction_set == kX86) {
1675 GenXorLong(rl_dest, rl_src1, rl_src2);
1676 return;
1677 }
1678 first_op = kOpXor;
1679 second_op = kOpXor;
1680 break;
1681 case Instruction::NEG_LONG: {
1682 GenNegLong(rl_dest, rl_src2);
1683 return;
1684 }
1685 default:
1686 LOG(FATAL) << "Invalid long arith op";
1687 }
1688 if (!call_out) {
1689 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
1690 } else {
1691 FlushAllRegs(); /* Send everything to home location */
1692 if (check_zero) {
1693 LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
1694 int r_tgt = CallHelperSetup(func_offset);
1695 GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
1696 LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
1697 // NOTE: callout here is not a safepoint
1698 CallHelper(r_tgt, func_offset, false /* not safepoint */);
1699 } else {
1700 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1701 }
1702 // Adjust return regs in to handle case of rem returning kArg2/kArg3
1703 if (ret_reg == TargetReg(kRet0))
1704 rl_result = GetReturnWide(false);
1705 else
1706 rl_result = GetReturnWideAlt();
1707 StoreValueWide(rl_dest, rl_result);
1708 }
1709}
1710
1711void Mir2Lir::GenConversionCall(int func_offset,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001712 RegLocation rl_dest, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001713 /*
1714 * Don't optimize the register usage since it calls out to support
1715 * functions
1716 */
1717 FlushAllRegs(); /* Send everything to home location */
1718 if (rl_src.wide) {
1719 LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
1720 rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
1721 } else {
1722 LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
1723 }
1724 CallRuntimeHelperRegLocation(func_offset, rl_src, false);
1725 if (rl_dest.wide) {
1726 RegLocation rl_result;
1727 rl_result = GetReturnWide(rl_dest.fp);
1728 StoreValueWide(rl_dest, rl_result);
1729 } else {
1730 RegLocation rl_result;
1731 rl_result = GetReturn(rl_dest.fp);
1732 StoreValue(rl_dest, rl_result);
1733 }
1734}
1735
1736/* Check if we need to check for pending suspend request */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001737void Mir2Lir::GenSuspendTest(int opt_flags) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001738 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1739 return;
1740 }
1741 FlushAllRegs();
1742 LIR* branch = OpTestSuspend(NULL);
1743 LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
1744 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
1745 reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
1746 branch->target = target;
1747 suspend_launchpads_.Insert(target);
1748}
1749
1750/* Check if we need to check for pending suspend request */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001751void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001752 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1753 OpUnconditionalBranch(target);
1754 return;
1755 }
1756 OpTestSuspend(target);
1757 LIR* launch_pad =
1758 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
1759 reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
1760 FlushAllRegs();
1761 OpUnconditionalBranch(launch_pad);
1762 suspend_launchpads_.Insert(launch_pad);
1763}
1764
1765} // namespace art