Overflow fixes for amrwbenc

Most of these were encountered while running a mixed sanitized/unsanitized
AMR encoder, so may not be reachable in real conditions.

Change-Id: I85af7d40214133234009323e7e64432fc1be39ca
diff --git a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
index 5808437..db3e058 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
@@ -222,14 +222,18 @@
     }
     else
     {
-        result = (Word32) var1 *((Word32) 1 << var2);
-        if ((var2 > 15 && var1 != 0) || (result != (Word32) ((Word16) result)))
+        if (var2 > 15 && var1 != 0)
         {
             var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
         }
         else
         {
-            var_out = extract_l (result);
+            result = (Word32) var1 *((Word32) 1 << var2);
+            if ((result != (Word32) ((Word16) result))) {
+                var_out = (Word16)((var1 > 0) ? MAX_16 : MIN_16);
+            } else {
+                var_out = extract_l (result);
+            }
         }
     }
     return (var_out);
diff --git a/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
index c7c9279..dbb94c6 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c2t64fx.c
@@ -93,7 +93,7 @@
 #endif
 
     Isqrt_n(&s, &exp);
-    k_dn = vo_round(L_shl(s, (exp + 8)));    /* k_dn = 256..4096 */
+    k_dn = voround(L_shl(s, (exp + 8)));    /* k_dn = 256..4096 */
     k_dn = vo_mult_r(alp, k_dn);              /* alp in Q12 */
 
     /* mix normalized cn[] and dn[] */
diff --git a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
index e3b2f6c..8bf15ea 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
@@ -257,7 +257,7 @@
 #endif
 
     Isqrt_n(&s, &exp);
-    k_dn = (L_shl(s, (exp + 5 + 3)) + 0x8000) >> 16;    /* k_dn = 256..4096 */
+    k_dn = voround(L_shl(s, (exp + 5 + 3)));    /* k_dn = 256..4096 */
     k_dn = vo_mult_r(alp, k_dn);              /* alp in Q12 */
 
     /* mix normalized cn[] and dn[] */
@@ -1005,7 +1005,7 @@
     for (x = track_x; x < L_SUBFR; x += STEP)
     {
         ps1 = *ps + dn[x];
-        alp1 = alp0 + ((*p0++)<<13);
+        alp1 = L_add(alp0, ((*p0++)<<13));
 
         if (dn2[x] < thres_ix)
         {
@@ -1018,7 +1018,7 @@
                 alp2 = L_add(alp2, ((*p2++)<<14));
                 alp_16 = extract_h(alp2);
                 sq = vo_mult(ps2, ps2);
-                s = vo_L_mult(alpk, sq) - ((sqk * alp_16)<<1);
+                s = L_sub(vo_L_mult(alpk, sq), L_mult(sqk, alp_16));
 
                 if (s > 0)
                 {
diff --git a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
index b2aa759..e834396 100644
--- a/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
+++ b/media/libstagefright/codecs/amrwbenc/src/cor_h_x.c
@@ -55,10 +55,10 @@
         p1 = &x[i];
         p2 = &h[0];
         for (j = i; j < L_SUBFR; j++)
-            L_tmp += vo_L_mult(*p1++, *p2++);
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
         y32[i] = L_tmp;
-        L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
         if(L_tmp > L_max)
         {
             L_max = L_tmp;
@@ -68,10 +68,10 @@
         p1 = &x[i+1];
         p2 = &h[0];
         for (j = i+1; j < L_SUBFR; j++)
-            L_tmp += vo_L_mult(*p1++, *p2++);
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
         y32[i+1] = L_tmp;
-        L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
         if(L_tmp > L_max1)
         {
             L_max1 = L_tmp;
@@ -81,10 +81,10 @@
         p1 = &x[i+2];
         p2 = &h[0];
         for (j = i+2; j < L_SUBFR; j++)
-            L_tmp += vo_L_mult(*p1++, *p2++);
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
         y32[i+2] = L_tmp;
-        L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
         if(L_tmp > L_max2)
         {
             L_max2 = L_tmp;
@@ -94,17 +94,23 @@
         p1 = &x[i+3];
         p2 = &h[0];
         for (j = i+3; j < L_SUBFR; j++)
-            L_tmp += vo_L_mult(*p1++, *p2++);
+            L_tmp = L_add(L_tmp, vo_L_mult(*p1++, *p2++));
 
         y32[i+3] = L_tmp;
-        L_tmp = (L_tmp > 0)? L_tmp:-L_tmp;
+        L_tmp = (L_tmp > 0)? L_tmp: (L_tmp == INT_MIN ? INT_MAX : -L_tmp);
         if(L_tmp > L_max3)
         {
             L_max3 = L_tmp;
         }
     }
     /* tot += 3*max / 8 */
-    L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
+    if (L_max > INT_MAX - L_max1 ||
+            L_max + L_max1 > INT_MAX - L_max2 ||
+            L_max + L_max1 + L_max2 > INT_MAX - L_max3) {
+        L_max = INT_MAX >> 2;
+    } else {
+        L_max = ((L_max + L_max1 + L_max2 + L_max3) >> 2);
+    }
     L_tot = vo_L_add(L_tot, L_max);       /* +max/4 */
     L_tot = vo_L_add(L_tot, (L_max >> 1));  /* +max/8 */