ART: Some Quick cleanup
Move some definitions around. In case a method is already virtual,
avoid instruction-set tests.
Change-Id: I8d98f098e55ade1bc0cfa32bb2aad006caccd07d
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index fa8dfe3..6ac1849 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -263,6 +263,9 @@
void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE;
+ bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+
private:
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 03e0e92..860a987 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1655,4 +1655,19 @@
StoreValueWide(rl_dest, rl_result);
}
+bool ArmMir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int lit) {
+ if (lit < 2) {
+ return false;
+ }
+
+ // ARM does either not support a division instruction, or it is potentially expensive. Look for
+ // more special cases.
+ if (!IsPowerOfTwo(lit)) {
+ return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
+ }
+
+ return Mir2Lir::HandleEasyDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index d2b32b5..aa47cee 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1727,15 +1727,11 @@
// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
// and store the result in 'rl_dest'.
-bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
+bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
- if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
+ if ((lit < 2) || (!IsPowerOfTwo(lit))) {
return false;
}
- // No divide instruction for Arm, so check for more special cases
- if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
- return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- }
int k = CTZ(lit);
if (k >= 30) {
// Avoid special cases.
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 4139b51..73629e8 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -479,83 +479,6 @@
/*
* Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
- int state, const MethodReference& target_method,
- uint32_t,
- uintptr_t direct_code, uintptr_t direct_method,
- InvokeType type) {
- UNUSED(info);
- DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 &&
- cu->instruction_set != kThumb2 && cu->instruction_set != kArm &&
- cu->instruction_set != kArm64);
- Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
- switch (state) {
- case 0: // Get the current Method* [sets kArg0]
- if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
- } else {
- cg->LoadCodeAddress(target_method, type, kInvokeTgt);
- }
- if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
- } else {
- cg->LoadMethodAddress(target_method, type, kArg0);
- }
- break;
- default:
- return -1;
- }
- } else {
- RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
- switch (state) {
- case 0: // Get the current Method* [sets kArg0]
- // TUNING: we can save a reg copy if Method* has been promoted.
- cg->LoadCurrMethodDirect(arg0_ref);
- break;
- case 1: // Get method->dex_cache_resolved_methods_
- cg->LoadRefDisp(arg0_ref,
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- arg0_ref,
- kNotVolatile);
- // Set up direct code if known.
- if (direct_code != 0) {
- if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
- } else {
- CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
- cg->LoadCodeAddress(target_method, type, kInvokeTgt);
- }
- }
- break;
- case 2: // Grab target method*
- CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadRefDisp(arg0_ref,
- ObjArray::OffsetOfElement(target_method.dex_method_index).Int32Value(),
- arg0_ref,
- kNotVolatile);
- break;
- case 3: // Grab the code from the method*
- if (direct_code == 0) {
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) {
- break; // kInvokeTgt := arg0_ref->entrypoint
- }
- } else {
- break;
- }
- DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
- FALLTHROUGH_INTENDED;
- default:
- return -1;
- }
- }
- return state + 1;
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in a virtual invoke sequence.
* We can use kLr as a temp prior to target address loading
* Note also that we'll load the first argument ("this") into
@@ -1028,10 +951,6 @@
}
bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
- return false;
- }
// Location of reference to data array
int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
@@ -1161,10 +1080,6 @@
}
bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
- return false;
- }
RegLocation rl_src = info->args[0];
rl_src = LoadValue(rl_src, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
@@ -1179,10 +1094,6 @@
}
bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
- return false;
- }
RegLocation rl_src = info->args[0];
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
@@ -1288,14 +1199,6 @@
* otherwise bails to standard library code.
*/
bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
- return false;
- }
- if (cu_->instruction_set == kX86_64) {
- // TODO - add kX86_64 implementation
- return false;
- }
RegLocation rl_obj = info->args[0];
RegLocation rl_char = info->args[1];
if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
@@ -1384,23 +1287,13 @@
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- switch (cu_->instruction_set) {
- case kArm:
- // Fall-through.
- case kThumb2:
- // Fall-through.
- case kMips:
- Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
- break;
-
- case kArm64:
- LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
- kNotVolatile);
- break;
-
- default:
- LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
+ if (Is64BitInstructionSet(cu_->instruction_set)) {
+ LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
+ kNotVolatile);
+ } else {
+ Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
}
+
StoreValue(rl_dest, rl_result);
return true;
}
@@ -1572,16 +1465,4 @@
}
}
-NextCallInsn Mir2Lir::GetNextSDCallInsn() {
- return NextSDCallInsn;
-}
-
-LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
- UNUSED(method_info);
- DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 &&
- cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm &&
- cu_->instruction_set != kArm64);
- return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
-}
-
} // namespace art
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 51a8c98..ed92e82 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -22,6 +22,8 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "mips_lir.h"
+#include "mirror/art_method.h"
+#include "mirror/object_array-inl.h"
namespace art {
@@ -319,4 +321,84 @@
OpReg(kOpBx, rs_rRA);
}
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in static & direct invoke sequences.
+ */
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
+ int state, const MethodReference& target_method,
+ uint32_t,
+ uintptr_t direct_code, uintptr_t direct_method,
+ InvokeType type) {
+ Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+ if (direct_code != 0 && direct_method != 0) {
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ if (direct_code != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+ }
+ if (direct_method != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ } else {
+ cg->LoadMethodAddress(target_method, type, kArg0);
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ // TUNING: we can save a reg copy if Method* has been promoted.
+ cg->LoadCurrMethodDirect(arg0_ref);
+ break;
+ case 1: // Get method->dex_cache_resolved_methods_
+ cg->LoadRefDisp(arg0_ref,
+ mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ arg0_ref,
+ kNotVolatile);
+ // Set up direct code if known.
+ if (direct_code != 0) {
+ if (direct_code != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+ }
+ }
+ break;
+ case 2: // Grab target method*
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ cg->LoadRefDisp(arg0_ref,
+ mirror::ObjectArray<mirror::Object>::
+ OffsetOfElement(target_method.dex_method_index).Int32Value(),
+ arg0_ref,
+ kNotVolatile);
+ break;
+ case 3: // Grab the code from the method*
+ if (direct_code == 0) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
+ // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
+ cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ }
+ return state + 1;
+}
+
+NextCallInsn MipsMir2Lir::GetNextSDCallInsn() {
+ return NextSDCallInsn;
+}
+
+LIR* MipsMir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
+ return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 9c3ce7b..ac14704 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -206,6 +206,29 @@
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div, int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
+ OVERRIDE;
+
+ NextCallInsn GetNextSDCallInsn() OVERRIDE;
+ LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+
+ // Unimplemented intrinsics.
+ bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
+ OVERRIDE {
+ return false;
+ }
+
private:
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -214,10 +237,6 @@
RegLocation rl_src2);
void ConvertShortToLongBranch(LIR* lir);
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) OVERRIDE;
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
- OVERRIDE;
};
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 5f8a71c..c3e9bb5 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -895,14 +895,14 @@
bool safepoint_pc);
void GenInvoke(CallInfo* info);
void GenInvokeNoInline(CallInfo* info);
- virtual NextCallInsn GetNextSDCallInsn();
+ virtual NextCallInsn GetNextSDCallInsn() = 0;
/*
* @brief Generate the actual call insn based on the method info.
* @param method_info the lowering info for the method call.
* @returns Call instruction
*/
- virtual LIR* GenCallInsn(const MirMethodLoweringInfo& method_info);
+ virtual LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) = 0;
virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
virtual int GenDalvikArgs(CallInfo* info, int call_state, LIR** pcrLabel,