Make slow paths easier to write
This adds a class LIRSlowPath that allows for deferred compilation
of slow paths. Using this object you can add code that will be
invoked out of line using a forward branch. The intention is to
move the slow paths out of the main flow and avoid branch-over
constructs that will almost always trigger. The forward branch
to the slow path code will be predicted false and this will
be correct most of the time. The slow path code returns to the
instruction after the original branch using an unconditional branch.
This is used in the following opcodes: sput, sget, const-string,
check-cast, const-class.
Others will follow.
Bug: 10864890
Change-Id: I17130c5dc20d369bc6bbf50b8cf04343263e888e
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 81053a3..57a968c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -258,6 +258,63 @@
bool first_in_pair;
};
+ //
+ // Slow paths. This object is used generate a sequence of code that is executed in the
+ // slow path. For example, resolving a string or class is slow as it will only be executed
+ // once (after that it is resolved and doesn't need to be done again). We want slow paths
+ // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward
+ // branch over them.
+ //
+ // If you want to create a slow path, declare a class derived from LIRSlowPath and provide
+ // the Compile() function that will be called near the end of the code generated by the
+ // method.
+ //
+ // The basic flow for a slow path is:
+ //
+ // CMP reg, #value
+ // BEQ fromfast
+ // cont:
+ // ...
+ // fast path code
+ // ...
+ // more code
+ // ...
+ // RETURN
+ ///
+ // fromfast:
+ // ...
+ // slow path code
+ // ...
+ // B cont
+ //
+ // So you see we need two labels and two branches. The first branch (called fromfast) is
+ // the conditional branch to the slow path code. The second label (called cont) is used
+ // as an unconditional branch target for getting back to the code after the slow path
+ // has completed.
+ //
+
+ class LIRSlowPath {
+ public:
+ LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
+ LIR* cont = nullptr) :
+ m2l_(m2l), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
+ }
+ virtual ~LIRSlowPath() {}
+ virtual void Compile() = 0;
+
+ static void* operator new(size_t size, ArenaAllocator* arena) {
+ return arena->Alloc(size, ArenaAllocator::kAllocData);
+ }
+
+ protected:
+ LIR* GenerateTargetLabel();
+
+ Mir2Lir* const m2l_;
+ const DexOffset current_dex_pc_;
+ LIR* const fromfast_;
+ LIR* const cont_;
+ };
+
virtual ~Mir2Lir() {}
int32_t s4FromSwitchData(const void* switch_data) {
@@ -323,6 +380,10 @@
*/
size_t GetNumBytesForCompilerTempSpillRegion();
+ DexOffset GetCurrentDexPc() const {
+ return current_dalvik_offset_;
+ }
+
int ComputeFrameSize();
virtual void Materialize();
virtual CompiledMethod* GetCompiledMethod();
@@ -470,6 +531,7 @@
void HandleSuspendLaunchPads();
void HandleIntrinsicLaunchPads();
void HandleThrowLaunchPads();
+ void HandleSlowPaths();
void GenBarrier();
LIR* GenCheck(ConditionCode c_code, ThrowKind kind);
LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val,
@@ -948,6 +1010,8 @@
virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
RegLocation rl_dest, RegLocation rl_src);
+ void AddSlowPath(LIRSlowPath* slowpath);
+
private:
void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
bool type_known_abstract, bool use_declaring_class,
@@ -961,6 +1025,11 @@
p->def_end = NULL;
}
+ void SetCurrentDexPc(DexOffset dexpc) {
+ current_dalvik_offset_ = dexpc;
+ }
+
+
public:
// TODO: add accessors for these.
LIR* literal_list_; // Constants.
@@ -1016,6 +1085,8 @@
unsigned int fp_spill_mask_;
LIR* first_lir_insn_;
LIR* last_lir_insn_;
+
+ GrowableArray<LIRSlowPath*> slow_paths_;
}; // Class Mir2Lir
} // namespace art