Detect special methods at the end of verification.
This moves special method handling to method inliner
and prepares for eventual inlining of these methods.
Change-Id: I51c51b940fb7bc714e33135cd61be69467861352
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 23ea407..8226b24 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -18,6 +18,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
+#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -217,58 +218,43 @@
* Special-case code genration for simple non-throwing leaf methods.
*/
void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
- SpecialCaseHandler special_case) {
+ const InlineMethod& special) {
+ // TODO: Generate the method using only the data in special. (Requires FastInstance() field
+ // validation in DexFileMethodInliner::AnalyseIGetMethod()/AnalyseIPutMethod().)
+ DCHECK(special.flags & kInlineSpecial);
current_dalvik_offset_ = mir->offset;
MIR* next_mir = NULL;
- switch (special_case) {
- case kNullMethod:
+ switch (special.opcode) {
+ case kInlineOpNop:
DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
next_mir = mir;
break;
- case kConstFunction:
+ case kInlineOpConst:
ArmMir2Lir::GenPrintLabel(mir);
- LoadConstant(rARM_RET0, mir->dalvikInsn.vB);
+ LoadConstant(rARM_RET0, special.data);
next_mir = GetNextMir(&bb, mir);
break;
- case kIGet:
- next_mir = SpecialIGet(&bb, mir, kWord, false, false);
+ case kInlineOpIGet: {
+ InlineIGetIPutData data;
+ data.data = special.data;
+ OpSize op_size = static_cast<OpSize>(data.d.op_size);
+ DCHECK_NE(data.d.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong.
+ bool long_or_double = (data.d.op_size == kLong);
+ bool is_object = data.d.is_object;
+ next_mir = SpecialIGet(&bb, mir, op_size, long_or_double, is_object);
break;
- case kIGetBoolean:
- case kIGetByte:
- next_mir = SpecialIGet(&bb, mir, kUnsignedByte, false, false);
+ }
+ case kInlineOpIPut: {
+ InlineIGetIPutData data;
+ data.data = special.data;
+ OpSize op_size = static_cast<OpSize>(data.d.op_size);
+ DCHECK_NE(data.d.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong.
+ bool long_or_double = (data.d.op_size == kLong);
+ bool is_object = data.d.is_object;
+ next_mir = SpecialIPut(&bb, mir, op_size, long_or_double, is_object);
break;
- case kIGetObject:
- next_mir = SpecialIGet(&bb, mir, kWord, false, true);
- break;
- case kIGetChar:
- next_mir = SpecialIGet(&bb, mir, kUnsignedHalf, false, false);
- break;
- case kIGetShort:
- next_mir = SpecialIGet(&bb, mir, kSignedHalf, false, false);
- break;
- case kIGetWide:
- next_mir = SpecialIGet(&bb, mir, kLong, true, false);
- break;
- case kIPut:
- next_mir = SpecialIPut(&bb, mir, kWord, false, false);
- break;
- case kIPutBoolean:
- case kIPutByte:
- next_mir = SpecialIPut(&bb, mir, kUnsignedByte, false, false);
- break;
- case kIPutObject:
- next_mir = SpecialIPut(&bb, mir, kWord, false, true);
- break;
- case kIPutChar:
- next_mir = SpecialIPut(&bb, mir, kUnsignedHalf, false, false);
- break;
- case kIPutShort:
- next_mir = SpecialIPut(&bb, mir, kSignedHalf, false, false);
- break;
- case kIPutWide:
- next_mir = SpecialIPut(&bb, mir, kLong, true, false);
- break;
- case kIdentity:
+ }
+ case kInlineOpReturnArg:
next_mir = SpecialIdentity(mir);
break;
default:
@@ -276,7 +262,7 @@
}
if (next_mir != NULL) {
current_dalvik_offset_ = next_mir->offset;
- if (special_case != kIdentity) {
+ if (special.opcode != kInlineOpReturnArg) {
ArmMir2Lir::GenPrintLabel(next_mir);
}
NewLIR1(kThumbBx, rARM_LR);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 25ddc94..c04f1d6 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -135,7 +135,7 @@
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+ void GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index bae8ff3..65286d5 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -19,6 +19,8 @@
#include "gc_map.h"
#include "mapping_table.h"
#include "mir_to_lir-inl.h"
+#include "dex/quick/dex_file_method_inliner.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/verified_methods_data.h"
#include "verifier/dex_gc_map.h"
#include "verifier/method_verifier.h"
@@ -983,8 +985,7 @@
core_spill_mask_(0),
fp_spill_mask_(0),
first_lir_insn_(NULL),
- last_lir_insn_(NULL),
- inliner_(nullptr) {
+ last_lir_insn_(NULL) {
promotion_map_ = static_cast<PromotionMap*>
(arena_->Alloc((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) *
sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
@@ -1000,15 +1001,16 @@
/* Allocate Registers using simple local allocation scheme */
SimpleRegAlloc();
- if (mir_graph_->IsSpecialCase()) {
- /*
- * Custom codegen for special cases. If for any reason the
- * special codegen doesn't succeed, first_lir_insn_ will
- * set to NULL;
- */
- cu_->NewTimingSplit("SpecialMIR2LIR");
- SpecialMIR2LIR(mir_graph_->GetSpecialCase());
- }
+ /*
+ * Custom codegen for special cases. If for any reason the
+ * special codegen doesn't succeed, first_lir_insn_ will
+ * set to NULL;
+ */
+ // TODO: Clean up GenSpecial() and return true only if special implementation is emitted.
+ // Currently, GenSpecial() returns IsSpecial() but doesn't check after SpecialMIR2LIR().
+ DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+ cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+ ->GenSpecial(this, cu_->method_idx);
/* Convert MIR to LIR, etc. */
if (first_lir_insn_ == NULL) {
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index ba98225..3c331e1 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -18,16 +18,19 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "base/mutex-inl.h"
+#include "locks.h"
#include "thread.h"
#include "thread-inl.h"
#include "dex/mir_graph.h"
+#include "dex_instruction.h"
+#include "dex_instruction-inl.h"
#include "dex_file_method_inliner.h"
namespace art {
const uint32_t DexFileMethodInliner::kIndexUnresolved;
-const char* DexFileMethodInliner::kClassCacheNames[] = {
+const char* const DexFileMethodInliner::kClassCacheNames[] = {
"Z", // kClassCacheBoolean
"B", // kClassCacheByte
"C", // kClassCacheChar
@@ -51,7 +54,7 @@
"Lsun/misc/Unsafe;", // kClassCacheSunMiscUnsafe
};
-const char* DexFileMethodInliner::kNameCacheNames[] = {
+const char* const DexFileMethodInliner::kNameCacheNames[] = {
"reverseBytes", // kNameCacheReverseBytes
"doubleToRawLongBits", // kNameCacheDoubleToRawLongBits
"longBitsToDouble", // kNameCacheLongBitsToDouble
@@ -164,7 +167,7 @@
const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods[] = {
#define INTRINSIC(c, n, p, o, d) \
- { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, d } }
+ { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, kInlineIntrinsic, d } }
INTRINSIC(JavaLangDouble, DoubleToRawLongBits, D_J, kIntrinsicDoubleCvt, 0),
INTRINSIC(JavaLangDouble, LongBitsToDouble, J_D, kIntrinsicDoubleCvt, 0),
@@ -245,17 +248,71 @@
DexFileMethodInliner::~DexFileMethodInliner() {
}
+bool DexFileMethodInliner::AnalyseMethodCode(uint32_t method_idx,
+ const DexFile::CodeItem* code_item) {
+ // We currently support only plain return or 2-instruction methods.
+
+ DCHECK_NE(code_item->insns_size_in_code_units_, 0u);
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ Instruction::Code opcode = instruction->Opcode();
+
+ switch (opcode) {
+ case Instruction::RETURN_VOID:
+ return AddInlineMethod(method_idx, kInlineOpNop, kInlineSpecial, 0);
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ return AnalyseReturnMethod(method_idx, code_item, kWord);
+ case Instruction::RETURN_WIDE:
+ return AnalyseReturnMethod(method_idx, code_item, kLong);
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST_HIGH16:
+ // TODO: Support wide constants (RETURN_WIDE).
+ return AnalyseConstMethod(method_idx, code_item);
+ case Instruction::IGET:
+ return AnalyseIGetMethod(method_idx, code_item, kWord, false);
+ case Instruction::IGET_OBJECT:
+ return AnalyseIGetMethod(method_idx, code_item, kWord, true);
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ return AnalyseIGetMethod(method_idx, code_item, kSignedByte, false);
+ case Instruction::IGET_CHAR:
+ return AnalyseIGetMethod(method_idx, code_item, kUnsignedHalf, false);
+ case Instruction::IGET_SHORT:
+ return AnalyseIGetMethod(method_idx, code_item, kSignedHalf, false);
+ case Instruction::IGET_WIDE:
+ return AnalyseIGetMethod(method_idx, code_item, kLong, false);
+ case Instruction::IPUT:
+ return AnalyseIPutMethod(method_idx, code_item, kWord, false);
+ case Instruction::IPUT_OBJECT:
+ return AnalyseIPutMethod(method_idx, code_item, kWord, true);
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ return AnalyseIPutMethod(method_idx, code_item, kSignedByte, false);
+ case Instruction::IPUT_CHAR:
+ return AnalyseIPutMethod(method_idx, code_item, kUnsignedHalf, false);
+ case Instruction::IPUT_SHORT:
+ return AnalyseIPutMethod(method_idx, code_item, kSignedHalf, false);
+ case Instruction::IPUT_WIDE:
+ return AnalyseIPutMethod(method_idx, code_item, kLong, false);
+ default:
+ return false;
+ }
+}
+
bool DexFileMethodInliner::IsIntrinsic(uint32_t method_index) {
ReaderMutexLock mu(Thread::Current(), lock_);
- return intrinsics_.find(method_index) != intrinsics_.end();
+ auto it = inline_methods_.find(method_index);
+ return it != inline_methods_.end() && (it->second.flags & kInlineIntrinsic) != 0;
}
bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
- Intrinsic intrinsic;
+ InlineMethod intrinsic;
{
ReaderMutexLock mu(Thread::Current(), lock_);
- auto it = intrinsics_.find(info->index);
- if (it == intrinsics_.end()) {
+ auto it = inline_methods_.find(info->index);
+ if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) {
return false;
}
intrinsic = it->second;
@@ -306,6 +363,27 @@
}
}
+bool DexFileMethodInliner::IsSpecial(uint32_t method_index) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ auto it = inline_methods_.find(method_index);
+ return it != inline_methods_.end() && (it->second.flags & kInlineSpecial) != 0;
+}
+
+bool DexFileMethodInliner::GenSpecial(Mir2Lir* backend, uint32_t method_idx) {
+ InlineMethod special;
+ {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ auto it = inline_methods_.find(method_idx);
+ if (it == inline_methods_.end() || (it->second.flags & kInlineSpecial) == 0) {
+ return false;
+ }
+ special = it->second;
+ }
+ // TODO: Return true only if special implementation is emitted.
+ backend->SpecialMIR2LIR(special);
+ return true;
+}
+
uint32_t DexFileMethodInliner::FindClassIndex(const DexFile* dex_file, IndexCache* cache,
ClassCacheIndex index) {
uint32_t* class_index = &cache->class_indexes[index];
@@ -418,13 +496,148 @@
DCHECK(dex_file_ == nullptr);
IndexCache cache;
for (const IntrinsicDef& def : kIntrinsicMethods) {
- uint32_t method_id = FindMethodIndex(dex_file, &cache, def.method_def);
- if (method_id != kIndexNotFound) {
- DCHECK(intrinsics_.find(method_id) == intrinsics_.end());
- intrinsics_[method_id] = def.intrinsic;
+ uint32_t method_idx = FindMethodIndex(dex_file, &cache, def.method_def);
+ if (method_idx != kIndexNotFound) {
+ DCHECK(inline_methods_.find(method_idx) == inline_methods_.end());
+ inline_methods_[method_idx] = def.intrinsic;
}
}
dex_file_ = dex_file;
}
+bool DexFileMethodInliner::AddInlineMethod(int32_t method_idx, InlineMethodOpcode opcode,
+ uint16_t flags, uint32_t data) {
+ WriterMutexLock mu(Thread::Current(), lock_);
+ InlineMethod* im = &inline_methods_[method_idx];
+ if (im->flags == 0) {
+ *im = InlineMethod{opcode, flags, data};
+ return true;
+ } else {
+ // TODO: Warning about a method being already inlined?
+ LOG(WARNING) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline, "
+ << im->flags;
+ return false;
+ }
+}
+
+bool DexFileMethodInliner::AnalyseReturnMethod(int32_t method_idx,
+ const DexFile::CodeItem* code_item, OpSize size) {
+ const Instruction* return_instruction = Instruction::At(code_item->insns_);
+ if (return_instruction->Opcode() == Instruction::RETURN_VOID) {
+ return AddInlineMethod(method_idx, kInlineOpNop, kInlineSpecial, 0);
+ }
+ uint32_t reg = return_instruction->VRegA_11x();
+ uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ DCHECK_GE(reg, arg_start);
+ DCHECK_LT(size == kLong ? reg + 1 : reg, code_item->registers_size_);
+
+ InlineReturnArgData data;
+ data.d.arg = reg - arg_start;
+ data.d.op_size = size;
+ data.d.reserved = 0;
+ return AddInlineMethod(method_idx, kInlineOpReturnArg, kInlineSpecial, data.data);
+}
+
+bool DexFileMethodInliner::AnalyseConstMethod(int32_t method_idx,
+ const DexFile::CodeItem* code_item) {
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ const Instruction* return_instruction = instruction->Next();
+ Instruction::Code return_opcode = return_instruction->Opcode();
+ if (return_opcode != Instruction::RETURN &&
+ return_opcode != Instruction::RETURN_OBJECT) {
+ return false;
+ }
+
+ uint32_t return_reg = return_instruction->VRegA_11x();
+ DCHECK_LT(return_reg, code_item->registers_size_);
+
+ uint32_t vA, vB, dummy;
+ uint64_t dummy_wide;
+ instruction->Decode(vA, vB, dummy_wide, dummy, nullptr);
+ if (instruction->Opcode() == Instruction::CONST_HIGH16) {
+ vB <<= 16;
+ }
+ DCHECK_LT(vA, code_item->registers_size_);
+ if (vA != return_reg) {
+ return false; // Not returning the value set by const?
+ }
+ if (return_opcode == Instruction::RETURN_OBJECT && vB != 0) {
+ return false; // Returning non-null reference constant?
+ }
+ return AddInlineMethod(method_idx, kInlineOpConst, kInlineSpecial, vB);
+}
+
+bool DexFileMethodInliner::AnalyseIGetMethod(int32_t method_idx, const DexFile::CodeItem* code_item,
+ OpSize size, bool is_object) {
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ Instruction::Code opcode = instruction->Opcode();
+ const Instruction* return_instruction = instruction->Next();
+ Instruction::Code return_opcode = return_instruction->Opcode();
+ if (!(return_opcode == Instruction::RETURN && size != kLong) &&
+ !(return_opcode == Instruction::RETURN_WIDE && size == kLong) &&
+ !(return_opcode == Instruction::RETURN_OBJECT && opcode == Instruction::IGET_OBJECT)) {
+ return false;
+ }
+
+ uint32_t return_reg = return_instruction->VRegA_11x();
+ DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1 : return_reg,
+ code_item->registers_size_);
+
+ uint32_t vA, vB, vC;
+ uint64_t dummy_wide;
+ instruction->Decode(vA, vB, dummy_wide, vC, nullptr);
+ uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ DCHECK_GE(vB, arg_start);
+ DCHECK_LT(vB, code_item->registers_size_);
+ DCHECK_LT(size == kLong ? vA + 1 : vA, code_item->registers_size_);
+ if (vA != return_reg) {
+ return false; // Not returning the value retrieved by iget?
+ }
+
+ // TODO: Check that the field is FastInstance().
+
+ InlineIGetIPutData data;
+ data.d.field = vC;
+ data.d.op_size = size;
+ data.d.is_object = is_object;
+ data.d.object_arg = vB - arg_start; // Allow iget on any register, not just "this"
+ data.d.src_arg = 0;
+ data.d.reserved = 0;
+ return AddInlineMethod(method_idx, kInlineOpIGet, kInlineSpecial, data.data);
+}
+
+bool DexFileMethodInliner::AnalyseIPutMethod(int32_t method_idx, const DexFile::CodeItem* code_item,
+ OpSize size, bool is_object) {
+ const Instruction* instruction = Instruction::At(code_item->insns_);
+ const Instruction* return_instruction = instruction->Next();
+ if (return_instruction->Opcode() != Instruction::RETURN_VOID) {
+ // TODO: Support returning an argument.
+ // This is needed by builder classes and generated accessor setters.
+ // builder.setX(value): iput value, this, fieldX; return-object this;
+ // object.access$nnn(value): iput value, this, fieldX; return value;
+ // Use InlineIGetIPutData::d::reserved to hold the information.
+ return false;
+ }
+
+ uint32_t vA, vB, vC;
+ uint64_t dummy_wide;
+ instruction->Decode(vA, vB, dummy_wide, vC, nullptr);
+ uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ DCHECK_GE(vB, arg_start);
+ DCHECK_GE(vA, arg_start);
+ DCHECK_LT(vB, code_item->registers_size_);
+ DCHECK_LT(size == kLong ? vA + 1 : vA, code_item->registers_size_);
+
+ // TODO: Check that the field (vC) is FastInstance().
+
+ InlineIGetIPutData data;
+ data.d.field = vC;
+ data.d.op_size = size;
+ data.d.is_object = is_object;
+ data.d.object_arg = vB - arg_start; // Allow iput on any register, not just "this"
+ data.d.src_arg = vA - arg_start;
+ data.d.reserved = 0;
+ return AddInlineMethod(method_idx, kInlineOpIPut, kInlineSpecial, data.data);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 9198f2a..06de4fe 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -21,15 +21,16 @@
#include <map>
#include "base/mutex.h"
#include "base/macros.h"
+#include "dex/compiler_enums.h"
+#include "dex_file.h"
#include "locks.h"
namespace art {
class CallInfo;
-class DexFile;
class Mir2Lir;
-enum IntrinsicOpcode {
+enum InlineMethodOpcode : uint16_t {
kIntrinsicDoubleCvt,
kIntrinsicFloatCvt,
kIntrinsicReverseBytes,
@@ -47,8 +48,26 @@
kIntrinsicCas,
kIntrinsicUnsafeGet,
kIntrinsicUnsafePut,
+
+ kInlineOpNop,
+ kInlineOpReturnArg,
+ kInlineOpConst,
+ kInlineOpIGet,
+ kInlineOpIPut,
};
+enum InlineMethodFlags {
+ kInlineIntrinsic = 0x0001,
+ kInlineSpecial = 0x0002,
+};
+
+struct InlineMethod {
+ uint16_t opcode;
+ uint16_t flags;
+ uint32_t data;
+};
+
+// IntrinsicFlags are stored in InlineMethod::data
enum IntrinsicFlags {
kIntrinsicFlagNone = 0,
@@ -73,10 +92,32 @@
kIntrinsicFlagIsOrdered = 8,
};
-struct Intrinsic {
- IntrinsicOpcode opcode;
+// Check that OpSize fits into 3 bits (at least the values the inliner uses).
+COMPILE_ASSERT(kWord < 8 && kLong < 8 && kSingle < 8 && kDouble < 8 && kUnsignedHalf < 8 &&
+ kSignedHalf < 8 && kUnsignedByte < 8 && kSignedByte < 8, op_size_field_too_narrow);
+
+union InlineIGetIPutData {
uint32_t data;
+ struct {
+ uint16_t field;
+ uint32_t op_size : 3; // OpSize
+ uint32_t is_object : 1;
+ uint32_t object_arg : 4;
+ uint32_t src_arg : 4; // iput only
+ uint32_t reserved : 4;
+ } d;
};
+COMPILE_ASSERT(sizeof(InlineIGetIPutData) == sizeof(uint32_t), InvalidSizeOfInlineIGetIPutData);
+
+union InlineReturnArgData {
+ uint32_t data;
+ struct {
+ uint16_t arg;
+ uint32_t op_size : 3; // OpSize
+ uint32_t reserved : 13;
+ } d;
+};
+COMPILE_ASSERT(sizeof(InlineReturnArgData) == sizeof(uint32_t), InvalidSizeOfInlineReturnArgData);
/**
* Handles inlining of methods from a particular DexFile.
@@ -96,6 +137,16 @@
~DexFileMethodInliner();
/**
+ * Analyse method code to determine if the method is a candidate for inlining.
+ * If it is, record its data for later.
+ *
+ * @param method_idx the index of the inlining candidate.
+ * @param code_item a previously verified code item of the method.
+ */
+ bool AnalyseMethodCode(uint32_t method_idx,
+ const DexFile::CodeItem* code_item) LOCKS_EXCLUDED(lock_);
+
+ /**
* Check whether a particular method index corresponds to an intrinsic function.
*/
bool IsIntrinsic(uint32_t method_index) LOCKS_EXCLUDED(lock_);
@@ -105,6 +156,16 @@
*/
bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) LOCKS_EXCLUDED(lock_);
+ /**
+ * Check whether a particular method index corresponds to a special function.
+ */
+ bool IsSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+
+ /**
+ * Generate code for a special function.
+ */
+ bool GenSpecial(Mir2Lir* backend, uint32_t method_idx);
+
private:
/**
* To avoid multiple lookups of a class by its descriptor, we cache its
@@ -261,7 +322,7 @@
*/
struct IntrinsicDef {
MethodDef method_def;
- Intrinsic intrinsic;
+ InlineMethod intrinsic;
};
/**
@@ -281,8 +342,8 @@
uint32_t proto_indexes[kProtoCacheLast - kProtoCacheFirst];
};
- static const char* kClassCacheNames[];
- static const char* kNameCacheNames[];
+ static const char* const kClassCacheNames[];
+ static const char* const kNameCacheNames[];
static const ProtoDef kProtoCacheDefs[];
static const IntrinsicDef kIntrinsicMethods[];
@@ -307,11 +368,23 @@
friend class DexFileToMethodInlinerMap;
+ bool AddInlineMethod(int32_t method_idx, InlineMethodOpcode opcode,
+ uint16_t flags, uint32_t data) LOCKS_EXCLUDED(lock_);
+
+ bool AnalyseReturnMethod(int32_t method_idx, const DexFile::CodeItem* code_item,
+ OpSize size) LOCKS_EXCLUDED(lock_);
+ bool AnalyseConstMethod(int32_t method_idx, const DexFile::CodeItem* code_item
+ ) LOCKS_EXCLUDED(lock_);
+ bool AnalyseIGetMethod(int32_t method_idx, const DexFile::CodeItem* code_item,
+ OpSize size, bool is_object) LOCKS_EXCLUDED(lock_);
+ bool AnalyseIPutMethod(int32_t method_idx, const DexFile::CodeItem* code_item,
+ OpSize size, bool is_object) LOCKS_EXCLUDED(lock_);
+
ReaderWriterMutex lock_;
/*
* Maps method indexes (for the particular DexFile) to Intrinsic defintions.
*/
- std::map<uint32_t, Intrinsic> intrinsics_ GUARDED_BY(lock_);
+ std::map<uint32_t, InlineMethod> inline_methods_ GUARDED_BY(lock_);
const DexFile* dex_file_;
DISALLOW_COPY_AND_ASSIGN(DexFileMethodInliner);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index ee6f9c8..82a1932 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1239,12 +1239,9 @@
void Mir2Lir::GenInvoke(CallInfo* info) {
if (!(info->opt_flags & MIR_INLINED)) {
- if (inliner_ == nullptr) {
- QuickCompilerContext* context = reinterpret_cast<QuickCompilerContext*>(
- cu_->compiler_driver->GetCompilerContext());
- inliner_ = context->GetInlinerMap()->GetMethodInliner(cu_->dex_file);
- }
- if (inliner_->GenIntrinsic(this, info)) {
+ DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
+ if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+ ->GenIntrinsic(this, info)) {
return;
}
}
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 21d5563..14f49aa 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -24,7 +24,7 @@
namespace art {
void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
- SpecialCaseHandler special_case) {
+ const InlineMethod& special) {
// TODO
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 450a44f..97dc2b3 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -133,7 +133,7 @@
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+ void GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 19d04be..c5bbae1 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -789,7 +789,8 @@
return false;
}
-void Mir2Lir::SpecialMIR2LIR(SpecialCaseHandler special_case) {
+void Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
+ cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
BasicBlock*bb = NULL;
@@ -815,7 +816,7 @@
ResetDefTracking();
ClobberAllRegs();
- GenSpecialCase(bb, mir, special_case);
+ GenSpecialCase(bb, mir, special);
}
void Mir2Lir::MethodMIR2LIR() {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 8415cbf..2a25f2f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -105,6 +105,7 @@
struct BasicBlock;
struct CallInfo;
struct CompilationUnit;
+struct InlineMethod;
struct MIR;
struct LIR;
struct RegLocation;
@@ -582,7 +583,7 @@
void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list);
void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir);
bool MethodBlockCodeGen(BasicBlock* bb);
- void SpecialMIR2LIR(SpecialCaseHandler special_case);
+ void SpecialMIR2LIR(const InlineMethod& special);
void MethodMIR2LIR();
@@ -703,7 +704,7 @@
virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) = 0;
virtual void GenSpecialCase(BasicBlock* bb, MIR* mir,
- SpecialCaseHandler special_case) = 0;
+ const InlineMethod& special) = 0;
virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
@@ -826,8 +827,6 @@
unsigned int fp_spill_mask_;
LIR* first_lir_insn_;
LIR* last_lir_insn_;
- // Lazily retrieved method inliner for intrinsics.
- DexFileMethodInliner* inliner_;
}; // Class Mir2Lir
} // namespace art
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 17924b0..4267b5b 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -23,7 +23,7 @@
namespace art {
void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
- SpecialCaseHandler special_case) {
+ const InlineMethod& special) {
// TODO
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 6552607..2d625c2 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -133,7 +133,7 @@
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+ void GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
// Single operation generators.
LIR* OpUnconditionalBranch(LIR* target);