diff options
| author | 2014-08-14 13:49:57 +0800 | |
|---|---|---|
| committer | 2014-08-29 16:49:12 +0000 | |
| commit | e5beb18ca08962ed271f4c1f703e0c52bc8805f3 (patch) | |
| tree | 2f7ab08b50d2711844379b1a42e7d7cbd4adc847 | |
| parent | c84b7c54c2251344018a4b4e78b122caf7c03b64 (diff) | |
ART: Address issues with kIntrinsicMinMaxDouble for x86
This patch fixes the following issues:
- Makes sure that invoke-static/range is considered for analysis
path that decides whether base of code register is needed.
- Invalidates the code pointer register in intrinsic implementations
of min/max for FP since generated code has diamond shape and Quick
does not correctly handle control flow merges.
- Reverts the clobbering of base of code register from constant
loading path since it reduces performance in linear code that
needs the register.
- Ensures that no assumption is made on whether base of code register
exists in cases like 64-bit where rip register may be used.
Change-Id: I96463ae1197e5cfa2a8bd3571163b38fb338a340
Signed-off-by: Razvan A Lupusoru <razvan.a.lupusoru@intel.com>
Signed-off-by: Alexei Zavjalov <alexei.zavjalov@intel.com>
Signed-off-by: Haitao Feng <haitao.feng@intel.com>
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
| -rwxr-xr-x | compiler/dex/quick/x86/fp_x86.cc | 19 | ||||
| -rw-r--r-- | compiler/dex/quick/x86/utility_x86.cc | 2 |
2 files changed, 20 insertions, 1 deletions
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 2920fb6507..21d1a5cec2 100755 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -730,6 +730,25 @@ bool X86Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) // Handle NaN. branch_nan->target = NewLIR0(kPseudoTargetLabel); LoadConstantWide(rl_result.reg, INT64_C(0x7ff8000000000000)); + + // The base_of_code_ compiler temp is non-null when it is reserved + // for being able to do data accesses relative to method start. + if (base_of_code_ != nullptr) { + // Loading from the constant pool may have used base of code register. + // However, the code here generates logic in diamond shape and not all + // paths load base of code register. Therefore, we ensure it is clobbered so + // that the temp caching system does not believe it is live at merge point. + RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); + if (rl_method.wide) { + rl_method = UpdateLocWide(rl_method); + } else { + rl_method = UpdateLoc(rl_method); + } + if (rl_method.location == kLocPhysReg) { + Clobber(rl_method.reg); + } + } + LIR* branch_exit_nan = NewLIR1(kX86Jmp8, 0); // Handle Min/Max. Copy greater/lesser value from src2. branch_cond1->target = NewLIR0(kPseudoTargetLabel); diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 022fd80663..4f65a0f5af 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -592,7 +592,6 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { kDouble, kNotVolatile); res->target = data_target; res->flags.fixup = kFixupLoad; - Clobber(rl_method.reg); store_method_addr_used_ = true; } else { if (r_dest.IsPair()) { @@ -1025,6 +1024,7 @@ void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) { store_method_addr_ = true; break; case Instruction::INVOKE_STATIC: + case Instruction::INVOKE_STATIC_RANGE: AnalyzeInvokeStatic(opcode, bb, mir); break; default: |