summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Aart Bik <ajcbik@google.com> 2016-01-07 15:33:50 -0800
committer Aart Bik <ajcbik@google.com> 2016-01-12 16:39:20 -0800
commit42249c3602c3d0243396ee3627ffb5906aa77c1e (patch)
tree1e822a21c87331246cbde3923eac88fa315fa2cc
parent922698ded1e80cad1ecce4c2172a88c76a216373 (diff)
Reduce code size by sharing slow paths.
Rationale: Sharing identical slow path code reduces code size. Background: Currently, slow paths with the same dex-pc, same physical register spilling code, and identical stack maps are shared (making this only useful for deopt slow paths). The newly introduced mechanism is sufficiently general to allow future improvements by e.g. allowing different dex-pc (by passing this to runtime) or even the kind of slow paths (by passing runtime addresses to the slowpath). Change-Id: I819615c47b4fd98440a241f681f93e4fc22d12e0
-rw-r--r--compiler/optimizing/code_generator.h118
-rw-r--r--compiler/optimizing/code_generator_arm.cc19
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc21
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc17
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc21
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc11
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc16
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--test/561-shared-slowpaths/expected.txt1
-rw-r--r--test/561-shared-slowpaths/info.txt1
-rw-r--r--test/561-shared-slowpaths/src/Main.java154
16 files changed, 325 insertions, 66 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 950043ea71..5958cd89bc 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -613,7 +613,7 @@ class CodeGenerator {
ArenaVector<SlowPathCode*> slow_paths_;
- // The current slow path that we're generating code for.
+ // The current slow-path that we're generating code for.
SlowPathCode* current_slow_path_;
// The current block index in `block_order_` of the block
@@ -674,6 +674,122 @@ class CallingConvention {
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
+/**
+ * A templated class SlowPathGenerator with a templated method NewSlowPath()
+ * that can be used by any code generator to share equivalent slow-paths with
+ * the objective of reducing generated code size.
+ *
+ * InstructionType: instruction that requires SlowPathCodeType
+ * SlowPathCodeType: subclass of SlowPathCode, with constructor SlowPathCodeType(InstructionType *)
+ */
+template <typename InstructionType>
+class SlowPathGenerator {
+ static_assert(std::is_base_of<HInstruction, InstructionType>::value,
+ "InstructionType is not a subclass of art::HInstruction");
+
+ public:
+ SlowPathGenerator(HGraph* graph, CodeGenerator* codegen)
+ : graph_(graph),
+ codegen_(codegen),
+ slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {}
+
+ // Creates and adds a new slow-path, if needed, or returns existing one otherwise.
+ // Templating the method (rather than the whole class) on the slow-path type enables
+ // keeping this code at a generic, non architecture-specific place.
+ //
+ // NOTE: This approach assumes each InstructionType only generates one SlowPathCodeType.
+ // To relax this requirement, we would need some RTTI on the stored slow-paths,
+ // or template the class as a whole on SlowPathType.
+ template <typename SlowPathCodeType>
+ SlowPathCodeType* NewSlowPath(InstructionType* instruction) {
+ static_assert(std::is_base_of<SlowPathCode, SlowPathCodeType>::value,
+ "SlowPathCodeType is not a subclass of art::SlowPathCode");
+ static_assert(std::is_constructible<SlowPathCodeType, InstructionType*>::value,
+ "SlowPathCodeType is not constructible from InstructionType*");
+ // Iterate over potential candidates for sharing. Currently, only same-typed
+ // slow-paths with exactly the same dex-pc are viable candidates.
+ // TODO: pass dex-pc/slow-path-type to run-time to allow even more sharing?
+ const uint32_t dex_pc = instruction->GetDexPc();
+ auto iter = slow_path_map_.find(dex_pc);
+ if (iter != slow_path_map_.end()) {
+ auto candidates = iter->second;
+ for (const auto& it : candidates) {
+ InstructionType* other_instruction = it.first;
+ SlowPathCodeType* other_slow_path = down_cast<SlowPathCodeType*>(it.second);
+ // Determine if the instructions allow for slow-path sharing.
+ if (HaveSameLiveRegisters(instruction, other_instruction) &&
+ HaveSameStackMap(instruction, other_instruction)) {
+ // Can share: reuse existing one.
+ return other_slow_path;
+ }
+ }
+ } else {
+ // First time this dex-pc is seen.
+ iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}});
+ }
+ // Cannot share: create and add new slow-path for this particular dex-pc.
+ SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction);
+ iter->second.emplace_back(std::make_pair(instruction, slow_path));
+ codegen_->AddSlowPath(slow_path);
+ return slow_path;
+ }
+
+ private:
+ // Tests if both instructions have same set of live physical registers. This ensures
+ // the slow-path has exactly the same preamble on saving these registers to stack.
+ bool HaveSameLiveRegisters(const InstructionType* i1, const InstructionType* i2) const {
+ const uint32_t core_spill = ~codegen_->GetCoreSpillMask();
+ const uint32_t fpu_spill = ~codegen_->GetFpuSpillMask();
+ RegisterSet* live1 = i1->GetLocations()->GetLiveRegisters();
+ RegisterSet* live2 = i2->GetLocations()->GetLiveRegisters();
+ return (((live1->GetCoreRegisters() & core_spill) ==
+ (live2->GetCoreRegisters() & core_spill)) &&
+ ((live1->GetFloatingPointRegisters() & fpu_spill) ==
+ (live2->GetFloatingPointRegisters() & fpu_spill)));
+ }
+
+ // Tests if both instructions have the same stack map. This ensures the interpreter
+ // will find exactly the same dex-registers at the same entries.
+ bool HaveSameStackMap(const InstructionType* i1, const InstructionType* i2) const {
+ DCHECK(i1->HasEnvironment());
+ DCHECK(i2->HasEnvironment());
+ // We conservatively test if the two instructions find exactly the same instructions
+ // and location in each dex-register. This guarantees they will have the same stack map.
+ HEnvironment* e1 = i1->GetEnvironment();
+ HEnvironment* e2 = i2->GetEnvironment();
+ if (e1->GetParent() != e2->GetParent() || e1->Size() != e2->Size()) {
+ return false;
+ }
+ for (size_t i = 0, sz = e1->Size(); i < sz; ++i) {
+ if (e1->GetInstructionAt(i) != e2->GetInstructionAt(i) ||
+ !e1->GetLocationAt(i).Equals(e2->GetLocationAt(i))) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ HGraph* const graph_;
+ CodeGenerator* const codegen_;
+
+ // Map from dex-pc to vector of already existing instruction/slow-path pairs.
+ ArenaSafeMap<uint32_t, ArenaVector<std::pair<InstructionType*, SlowPathCode*>>> slow_path_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathGenerator);
+};
+
+class InstructionCodeGenerator : public HGraphVisitor {
+ public:
+ InstructionCodeGenerator(HGraph* graph, CodeGenerator* codegen)
+ : HGraphVisitor(graph),
+ deopt_slow_paths_(graph, codegen) {}
+
+ protected:
+ // Add slow-path generator for each instruction/slow-path combination that desires sharing.
+ // TODO: under current regime, only deopt sharing make sense; extend later.
+ SlowPathGenerator<HDeoptimize> deopt_slow_paths_;
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 0be1520598..45520b45bf 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -350,24 +350,24 @@ class TypeCheckSlowPathARM : public SlowPathCode {
class DeoptimizationSlowPathARM : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathARM(HInstruction* instruction)
+ explicit DeoptimizationSlowPathARM(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
- arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
};
@@ -913,7 +913,7 @@ void CodeGeneratorARM::UpdateBlockedPairRegisters() const {
}
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1655,8 +1655,7 @@ void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DeoptimizationSlowPathARM(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 26ca71e95f..26d6d63b31 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -188,7 +188,7 @@ class LocationsBuilderARM : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
};
-class InstructionCodeGeneratorARM : public HGraphVisitor {
+class InstructionCodeGeneratorARM : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1ad487d940..a3150d3d22 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -477,24 +477,24 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
public:
- explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
};
@@ -1605,7 +1605,7 @@ void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruct
InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
CodeGeneratorARM64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -2939,9 +2939,8 @@ void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathARM64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeARM64* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 0e90ac6345..f2ff89488e 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -186,7 +186,7 @@ class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
};
-class InstructionCodeGeneratorARM64 : public HGraphVisitor {
+class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 7bc0635e75..8d3e863d82 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -444,19 +444,16 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit DeoptimizationSlowPathMIPS(HInstruction* instruction)
+ explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
instruction_,
- dex_pc,
+ instruction_->GetDexPc(),
this,
IsDirectEntrypoint(kQuickDeoptimize));
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
@@ -465,7 +462,7 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
};
@@ -1241,7 +1238,7 @@ void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instructi
InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
CodeGeneratorMIPS* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -3357,8 +3354,8 @@ void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DeoptimizationSlowPathMIPS(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeMIPS* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 38302ad315..c3d4851ee9 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -197,7 +197,7 @@ class LocationsBuilderMIPS : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
};
-class InstructionCodeGeneratorMIPS : public HGraphVisitor {
+class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7682ca7800..8c4f0c6c0d 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -391,24 +391,24 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
- uint32_t dex_pc = deoptimize->GetDexPc();
- CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
+ mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
};
@@ -1113,7 +1113,7 @@ void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruc
InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
CodeGeneratorMIPS64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -2735,9 +2735,8 @@ void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathMIPS64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCodeMIPS64* slow_path =
+ deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 60ff96dc43..7182e8e987 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -201,7 +201,7 @@ class LocationsBuilderMIPS64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64);
};
-class InstructionCodeGeneratorMIPS64 : public HGraphVisitor {
+class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4a0c2f47dc..c24d25876c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -365,11 +365,10 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
class DeoptimizationSlowPathX86 : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathX86(HInstruction* instruction)
+ explicit DeoptimizationSlowPathX86(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- DCHECK(instruction_->IsDeoptimize());
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
@@ -383,7 +382,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCode {
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
};
@@ -892,7 +891,7 @@ void CodeGeneratorX86::UpdateBlockedPairRegisters() const {
}
InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1611,9 +1610,7 @@ void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathX86(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index df7347658b..c65c423eae 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -178,7 +178,7 @@ class LocationsBuilderX86 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
};
-class InstructionCodeGeneratorX86 : public HGraphVisitor {
+class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ec62d84b79..294b40e3d4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -387,18 +387,16 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
class DeoptimizationSlowPathX86_64 : public SlowPathCode {
public:
- explicit DeoptimizationSlowPathX86_64(HInstruction* instruction)
+ explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction)
: instruction_(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, instruction_->GetLocations());
- DCHECK(instruction_->IsDeoptimize());
- HDeoptimize* deoptimize = instruction_->AsDeoptimize();
x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
- deoptimize,
- deoptimize->GetDexPc(),
+ instruction_,
+ instruction_->GetDexPc(),
this);
CheckEntrypointTypes<kQuickDeoptimize, void, void>();
}
@@ -406,7 +404,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode {
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
private:
- HInstruction* const instruction_;
+ HDeoptimize* const instruction_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
};
@@ -1000,7 +998,7 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph,
CodeGeneratorX86_64* codegen)
- : HGraphVisitor(graph),
+ : InstructionCodeGenerator(graph, codegen),
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
@@ -1594,9 +1592,7 @@ void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
}
void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
- DeoptimizationSlowPathX86_64(deoptimize);
- codegen_->AddSlowPath(slow_path);
+ SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index c5e8a04da6..505c9dcdad 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -183,7 +183,7 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64);
};
-class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
+class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
public:
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
diff --git a/test/561-shared-slowpaths/expected.txt b/test/561-shared-slowpaths/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/561-shared-slowpaths/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/561-shared-slowpaths/info.txt b/test/561-shared-slowpaths/info.txt
new file mode 100644
index 0000000000..c51e70b452
--- /dev/null
+++ b/test/561-shared-slowpaths/info.txt
@@ -0,0 +1 @@
+Test on correctness while possibly sharing slow paths.
diff --git a/test/561-shared-slowpaths/src/Main.java b/test/561-shared-slowpaths/src/Main.java
new file mode 100644
index 0000000000..718b8750a7
--- /dev/null
+++ b/test/561-shared-slowpaths/src/Main.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on correctness in situations where slow paths may be shared
+// (actual sharing may vary between different code generators).
+//
+//
+public class Main {
+
+ // A method with two loops that can be optimized with dynamic BCE,
+ // resulting in a two times a deopt on null, a deopt on lower OOB,
+ // and a deopt on upper OOB.
+ private static void init(int[] x, int [] y, int l1, int h1, int l2, int h2) {
+ for (int i = l1; i < h1; i++) {
+ x[i] = i;
+ }
+ for (int i = l2; i < h2; i++) {
+ y[i] = i;
+ }
+ }
+
+ // Test that each of the six possible exceptions situations for init()
+ // are correctly handled by the deopt instructions.
+ public static void main(String[] args) {
+ int[] x = new int[100];
+ int[] y = new int[100];
+ int z;
+
+ // All is well.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, 0, 100);
+ } catch (Exception e) {
+ z = 1;
+ }
+ expectEquals(z, 0);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], i);
+ }
+
+ // Null deopt on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(null, y, 0, 100, 0, 100);
+ } catch (NullPointerException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], 0);
+ expectEquals(y[i], 0);
+ }
+
+ // Lower out-of-bounds on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, -1, 100, 0, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], 0);
+ expectEquals(y[i], 0);
+ }
+
+ // Upper out-of-bounds on x.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 101, 0, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Null deopt on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, null, 0, 100, 0, 100);
+ } catch (NullPointerException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Lower out-of-bounds on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, -1, 100);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], 0);
+ }
+
+ // Upper out-of-bounds on y.
+ z = 0;
+ reset(x, y);
+ try {
+ init(x, y, 0, 100, 0, 101);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ z = 1;
+ }
+ expectEquals(z, 1);
+ for (int i = 0; i < 100; i++) {
+ expectEquals(x[i], i);
+ expectEquals(y[i], i);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void reset(int[] x, int[] y) {
+ for (int i = 0; i < x.length; i++) x[i] = 0;
+ for (int i = 0; i < y.length; i++) y[i] = 0;
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}