Fixes for x86 compiler optimizations.

x86 works with all but a few optimizations turned on, and the broken
ones are still disabled for now. This change includes:

- Flagging of opcodes to incidate register use and def. Also, made
  flagging more complete for loads/stores and set/use ccodes.

- Fixes to load store elimination, though it still doesn't work yet.

- Prevent double values that are loaded or stored from losing their
  FP_DOUBLE flag. Later optimizations use this sizing.

- Renumbering of DOUBLE registers so they alias with FP regs when
  masked.

- Add support in the disassembler to recognize shifts.

Change-Id: I758cdce418409fdd84206ce295005d5c9ab635f8
diff --git a/src/compiler/codegen/LocalOptimizations.cc b/src/compiler/codegen/LocalOptimizations.cc
index 55ba03a..faab3e0 100644
--- a/src/compiler/codegen/LocalOptimizations.cc
+++ b/src/compiler/codegen/LocalOptimizations.cc
@@ -86,11 +86,16 @@
     /* Skip non-interesting instructions */
     if ((thisLIR->flags.isNop == true) ||
         isPseudoOpcode(thisLIR->opcode) ||
+        (EncodingMap[thisLIR->opcode].flags & IS_BRANCH) ||
         !(EncodingMap[thisLIR->opcode].flags & (IS_LOAD | IS_STORE))) {
       continue;
     }
 
+#if defined(TARGET_X86)
+    int nativeRegId = (EncodingMap[thisLIR->opcode].flags & IS_STORE) ? thisLIR->operands[2] : thisLIR->operands[0];
+#else
     int nativeRegId = thisLIR->operands[0];
+#endif
     bool isThisLIRLoad = EncodingMap[thisLIR->opcode].flags & IS_LOAD;
     LIR* checkLIR;
     /* Use the mem mask to determine the rough memory location */
@@ -102,9 +107,8 @@
      */
     if (!(thisMemMask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
 
-// FIXME: make sure we have a branch barrier for x86
 #if defined(TARGET_X86)
-    u8 stopUseRegMask = (thisLIR->useMask) & ~ENCODE_MEM;
+    u8 stopUseRegMask = (IS_BRANCH | thisLIR->useMask) & ~ENCODE_MEM;
 #else
     /*
      * Add r15 (pc) to the resource mask to prevent this instruction