Support callee save floating point registers on x64.

- Share the computation of core_spill_mask and fpu_spill_mask
  between backends.
- Remove explicit stack overflow check support: we need to adjust
  them and since they are not tested, they will easily bitrot.

Change-Id: I0b619b8de4e1bdb169ea1ae7c6ede8df0d65837a
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 9e26ddd..c0fdcaa 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -31,7 +31,6 @@
 
 namespace x86 {
 
-static constexpr int kNumberOfPushedRegistersAtEntry = 1;
 static constexpr int kCurrentMethodStackOffset = 0;
 
 static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
@@ -44,6 +43,7 @@
 
 // Marker for places that can be updated once we don't follow the quick ABI.
 static constexpr bool kFollowsQuickABI = true;
+static constexpr int kFakeReturnRegister = Register(8);
 
 class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> {
  public:
@@ -123,21 +123,6 @@
   DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86);
 };
 
-class StackOverflowCheckSlowPathX86 : public SlowPathCodeX86 {
- public:
-  StackOverflowCheckSlowPathX86() {}
-
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
-    __ Bind(GetEntryLabel());
-    __ addl(ESP,
-            Immediate(codegen->GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
-    __ fs()->jmp(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowStackOverflow)));
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86);
-};
-
 class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
  public:
   BoundsCheckSlowPathX86(HBoundsCheck* instruction,
@@ -375,14 +360,13 @@
 
 CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, const CompilerOptions& compiler_options)
     : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters,
-                    kNumberOfRegisterPairs, 0, 0, compiler_options),
+                    kNumberOfRegisterPairs, (1 << kFakeReturnRegister), 0, compiler_options),
       block_labels_(graph->GetArena(), 0),
       location_builder_(graph, this),
       instruction_visitor_(graph, this),
-      move_resolver_(graph->GetArena(), this) {}
-
-size_t CodeGeneratorX86::FrameEntrySpillSize() const {
-  return kNumberOfPushedRegistersAtEntry * kX86WordSize;
+      move_resolver_(graph->GetArena(), this) {
+  // Use a fake return address register to mimic Quick.
+  AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
 }
 
 Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type) const {
@@ -464,35 +448,21 @@
         codegen_(codegen) {}
 
 void CodeGeneratorX86::GenerateFrameEntry() {
-  // Create a fake register to mimic Quick.
-  static const int kFakeReturnRegister = 8;
-  core_spill_mask_ |= (1 << kFakeReturnRegister);
-
   bool skip_overflow_check =
       IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
-  bool implicitStackOverflowChecks = GetCompilerOptions().GetImplicitStackOverflowChecks();
+  DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
 
-  if (!skip_overflow_check && implicitStackOverflowChecks) {
+  if (!skip_overflow_check) {
     __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86))));
     RecordPcInfo(nullptr, 0);
   }
 
-  // The return PC has already been pushed on the stack.
-  __ subl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
-
-  if (!skip_overflow_check && !implicitStackOverflowChecks) {
-    SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86();
-    AddSlowPath(slow_path);
-
-    __ fs()->cmpl(ESP, Address::Absolute(Thread::StackEndOffset<kX86WordSize>()));
-    __ j(kLess, slow_path->GetEntryLabel());
-  }
-
+  __ subl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
   __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
 }
 
 void CodeGeneratorX86::GenerateFrameExit() {
-  __ addl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize));
+  __ addl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
 }
 
 void CodeGeneratorX86::Bind(HBasicBlock* block) {