Revert "Revert "Add JIT""
Added missing EntryPointToCodePointer.
This reverts commit a5ca888d715cd0c6c421313211caa1928be3e399.
Change-Id: Ia74df0ef3a7babbdcb0466fd24da28e304e3f5af
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 7245853..f636e3b 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -427,7 +427,7 @@
InlineMethod intrinsic;
{
ReaderMutexLock mu(Thread::Current(), lock_);
- auto it = inline_methods_.find(info->index);
+ auto it = inline_methods_.find(info->method_ref.dex_method_index);
if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) {
return false;
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3c9b7a3..6f68d1a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -865,7 +865,12 @@
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
RegLocation rl_dest, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
- DCHECK_EQ(IGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
+ if (kIsDebugBuild) {
+ auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
+ IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
+ IGetMemAccessType(mir->dalvikInsn.opcode);
+ DCHECK_EQ(mem_access_type, field_info.MemAccessType()) << mir->dalvikInsn.opcode;
+ }
cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
@@ -890,6 +895,9 @@
StoreValue(rl_dest, rl_result);
}
} else {
+ if (field_info.DeclaringDexFile() != nullptr) {
+ DCHECK_EQ(field_info.DeclaringDexFile(), cu_->dex_file);
+ }
DCHECK(SizeMatchesTypeForEntrypoint(size, type));
QuickEntrypointEnum target;
switch (type) {
@@ -939,7 +947,12 @@
void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_src, RegLocation rl_obj) {
const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
- DCHECK_EQ(IPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
+ if (kIsDebugBuild) {
+ auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
+ IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
+ IPutMemAccessType(mir->dalvikInsn.opcode);
+ DCHECK_EQ(mem_access_type, field_info.MemAccessType());
+ }
cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 8e3df7c..040b07c 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -863,11 +863,12 @@
RegLocation res;
if (info->result.location == kLocInvalid) {
// If result is unused, return a sink target based on type of invoke target.
- res = GetReturn(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ res = GetReturn(
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -876,11 +877,12 @@
RegLocation res;
if (info->result.location == kLocInvalid) {
// If result is unused, return a sink target based on type of invoke target.
- res = GetReturnWide(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ res = GetReturnWide(ShortyToRegClass(
+ mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -1418,7 +1420,8 @@
void Mir2Lir::GenInvoke(CallInfo* info) {
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
- if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+ const DexFile* dex_file = info->method_ref.dex_file;
+ if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file)
->GenIntrinsic(this, info)) {
return;
}
@@ -1428,7 +1431,7 @@
void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
int call_state = 0;
LIR* null_ck;
- LIR** p_null_ck = NULL;
+ LIR** p_null_ck = nullptr;
NextCallInsn next_call_insn;
FlushAllRegs(); /* Everything to home location */
// Explicit register usage
@@ -1440,6 +1443,7 @@
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
bool skip_this;
+
if (info->type == kInterface) {
next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
skip_this = fast_path;
@@ -1469,7 +1473,8 @@
// Finish up any of the call sequence not interleaved in arg loading
while (call_state >= 0) {
call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
- method_info.DirectCode(), method_info.DirectMethod(), original_type);
+ method_info.DirectCode(), method_info.DirectMethod(),
+ original_type);
}
LIR* call_insn = GenCallInsn(method_info);
MarkSafepointPC(call_insn);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 34e5e25..966a92d 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -540,6 +540,7 @@
GenMoveException(rl_dest);
break;
+ case Instruction::RETURN_VOID_BARRIER:
case Instruction::RETURN_VOID:
if (((cu_->access_flags & kAccConstructor) != 0) &&
cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
@@ -790,10 +791,12 @@
GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
break;
+ case Instruction::IGET_OBJECT_QUICK:
case Instruction::IGET_OBJECT:
GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_WIDE_QUICK:
case Instruction::IGET_WIDE:
// kPrimLong and kPrimDouble share the same entrypoints.
if (rl_dest.fp) {
@@ -803,6 +806,7 @@
}
break;
+ case Instruction::IGET_QUICK:
case Instruction::IGET:
if (rl_dest.fp) {
GenIGet(mir, opt_flags, kSingle, Primitive::kPrimFloat, rl_dest, rl_src[0]);
@@ -811,43 +815,54 @@
}
break;
+ case Instruction::IGET_CHAR_QUICK:
case Instruction::IGET_CHAR:
GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_SHORT_QUICK:
case Instruction::IGET_SHORT:
GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_BOOLEAN_QUICK:
case Instruction::IGET_BOOLEAN:
GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]);
break;
+ case Instruction::IGET_BYTE_QUICK:
case Instruction::IGET_BYTE:
GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]);
break;
+ case Instruction::IPUT_WIDE_QUICK:
case Instruction::IPUT_WIDE:
GenIPut(mir, opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_OBJECT_QUICK:
case Instruction::IPUT_OBJECT:
GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_QUICK:
case Instruction::IPUT:
GenIPut(mir, opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_BYTE_QUICK:
+ case Instruction::IPUT_BOOLEAN_QUICK:
case Instruction::IPUT_BYTE:
case Instruction::IPUT_BOOLEAN:
GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_CHAR_QUICK:
case Instruction::IPUT_CHAR:
GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]);
break;
+ case Instruction::IPUT_SHORT_QUICK:
case Instruction::IPUT_SHORT:
GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]);
break;
@@ -921,9 +936,12 @@
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
break;
+ case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
break;
+
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
break;
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 19c2a5a..fcf4716 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -542,6 +542,11 @@
void QuickCompiler::InitCompilationUnit(CompilationUnit& cu) const {
// Disable optimizations according to instruction set.
cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set];
+ if (Runtime::Current()->UseJit()) {
+ // Disable these optimizations for JIT until quickened byte codes are done being implemented.
+ // TODO: Find a cleaner way to do this.
+ cu.disable_opt |= 1u << kLocalValueNumbering;
+ }
}
void QuickCompiler::Init() {