Remove remaining MIPS support.

With the exception of dwarf support in libelffile.

Test: aosp_taimen-userdebug boots.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 147346243
Change-Id: Ib25acbc98aa7f63ce49a7ed2f81a4a64d48eac39
diff --git a/build/Android.common.mk b/build/Android.common.mk
index e96e3ed..4d702e4 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -17,7 +17,7 @@
 ifndef ART_ANDROID_COMMON_MK
 ART_ANDROID_COMMON_MK = true
 
-ART_TARGET_SUPPORTED_ARCH := arm arm64 mips mips64 x86 x86_64
+ART_TARGET_SUPPORTED_ARCH := arm arm64 x86 x86_64
 ART_HOST_SUPPORTED_ARCH := x86 x86_64
 ART_DEXPREOPT_BOOT_JAR_DIR := system/framework
 
diff --git a/build/art.go b/build/art.go
index 353a682..f3b29bb 100644
--- a/build/art.go
+++ b/build/art.go
@@ -26,7 +26,7 @@
 	"android/soong/cc"
 )
 
-var supportedArches = []string{"arm", "arm64", "mips", "mips64", "x86", "x86_64"}
+var supportedArches = []string{"arm", "arm64", "x86", "x86_64"}
 
 func globalFlags(ctx android.LoadHookContext) ([]string, []string) {
 	var cflags []string
@@ -88,16 +88,12 @@
 		cflags = append(cflags,
 			"-DART_STACK_OVERFLOW_GAP_arm=8192",
 			"-DART_STACK_OVERFLOW_GAP_arm64=16384",
-			"-DART_STACK_OVERFLOW_GAP_mips=16384",
-			"-DART_STACK_OVERFLOW_GAP_mips64=16384",
 			"-DART_STACK_OVERFLOW_GAP_x86=16384",
 			"-DART_STACK_OVERFLOW_GAP_x86_64=20480")
 	} else {
 		cflags = append(cflags,
 			"-DART_STACK_OVERFLOW_GAP_arm=8192",
 			"-DART_STACK_OVERFLOW_GAP_arm64=8192",
-			"-DART_STACK_OVERFLOW_GAP_mips=16384",
-			"-DART_STACK_OVERFLOW_GAP_mips64=16384",
 			"-DART_STACK_OVERFLOW_GAP_x86=8192",
 			"-DART_STACK_OVERFLOW_GAP_x86_64=8192")
 	}
diff --git a/build/codegen.go b/build/codegen.go
index 7ada8f5..bc7dc42 100644
--- a/build/codegen.go
+++ b/build/codegen.go
@@ -62,10 +62,6 @@
 			arch = &c.Codegen.Arm
 		case "arm64":
 			arch = &c.Codegen.Arm64
-		case "mips":
-			arch = &c.Codegen.Mips
-		case "mips64":
-			arch = &c.Codegen.Mips64
 		case "x86":
 			arch = &c.Codegen.X86
 		case "x86_64":
@@ -192,7 +188,7 @@
 
 type codegenProperties struct {
 	Codegen struct {
-		Arm, Arm64, Mips, Mips64, X86, X86_64 codegenArchProperties
+		Arm, Arm64, X86, X86_64 codegenArchProperties
 	}
 }
 
@@ -203,8 +199,6 @@
 		arches[s] = true
 		if s == "arm64" {
 			arches["arm"] = true
-		} else if s == "mips64" {
-			arches["mips"] = true
 		} else if s == "x86_64" {
 			arches["x86"] = true
 		}
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h
index 1b7954d..f41db07 100644
--- a/compiler/debug/elf_debug_frame_writer.h
+++ b/compiler/debug/elf_debug_frame_writer.h
@@ -88,30 +88,6 @@
       WriteCIE(is64bit, return_reg, opcodes, buffer);
       return;
     }
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64: {
-      dwarf::DebugFrameOpCodeWriter<> opcodes;
-      opcodes.DefCFA(Reg::MipsCore(29), 0);  // R29(SP).
-      // core registers.
-      for (int reg = 1; reg < 26; reg++) {
-        if (reg < 16 || reg == 24 || reg == 25) {  // AT, V*, A*, T*.
-          opcodes.Undefined(Reg::MipsCore(reg));
-        } else {
-          opcodes.SameValue(Reg::MipsCore(reg));
-        }
-      }
-      // fp registers.
-      for (int reg = 0; reg < 32; reg++) {
-        if (reg < 24) {
-          opcodes.Undefined(Reg::Mips64Fp(reg));
-        } else {
-          opcodes.SameValue(Reg::Mips64Fp(reg));
-        }
-      }
-      auto return_reg = Reg::MipsCore(31);  // R31(RA).
-      WriteCIE(is64bit, return_reg, opcodes, buffer);
-      return;
-    }
     case InstructionSet::kX86: {
       // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
       constexpr bool generate_opcodes_for_x86_fp = false;
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 479725b..e7b2a1b 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -70,8 +70,6 @@
         dwarf_isa = 1;  // DW_ISA_ARM_thumb.
         break;
       case InstructionSet::kArm64:
-      case InstructionSet::kMips:
-      case InstructionSet::kMips64:
         code_factor_bits_ = 2;  // 32-bit instructions
         break;
       case InstructionSet::kNone:
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index a5a84bb..37ab948 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -42,10 +42,6 @@
       return Reg::X86Core(machine_reg);
     case InstructionSet::kX86_64:
       return Reg::X86_64Core(machine_reg);
-    case InstructionSet::kMips:
-      return Reg::MipsCore(machine_reg);
-    case InstructionSet::kMips64:
-      return Reg::Mips64Core(machine_reg);
     case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
@@ -63,10 +59,6 @@
       return Reg::X86Fp(machine_reg);
     case InstructionSet::kX86_64:
       return Reg::X86_64Fp(machine_reg);
-    case InstructionSet::kMips:
-      return Reg::MipsFp(machine_reg);
-    case InstructionSet::kMips64:
-      return Reg::Mips64Fp(machine_reg);
     case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 6c83f9a..9dd7953 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -108,12 +108,11 @@
   std::string boot_image_location_;
 };
 
-#if defined (ART_TARGET) && !defined(__mips__)
+#if defined (ART_TARGET)
 TEST_F(ImgDiagTest, ImageDiffPidSelf) {
 #else
 // Can't run this test on the host, it will fail when trying to open /proc/kpagestats
 // because it's root read-only.
-// Also test fails on mips. b/24596015.
 TEST_F(ImgDiagTest, DISABLED_ImageDiffPidSelf) {
 #endif
   // Invoke 'img_diag' against the current process.
diff --git a/libartbase/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc
index 8d4fbf4..9ec66fe 100644
--- a/libartbase/arch/instruction_set.cc
+++ b/libartbase/arch/instruction_set.cc
@@ -29,8 +29,6 @@
     case InstructionSet::kArm64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
     case InstructionSet::kNone:
       LOG(FATAL) << "Unsupported instruction set " << isa;
       UNREACHABLE();
@@ -50,10 +48,6 @@
       return "x86";
     case InstructionSet::kX86_64:
       return "x86_64";
-    case InstructionSet::kMips:
-      return "mips";
-    case InstructionSet::kMips64:
-      return "mips64";
     case InstructionSet::kNone:
       return "none";
   }
@@ -72,10 +66,6 @@
     return InstructionSet::kX86;
   } else if (strcmp("x86_64", isa_str) == 0) {
     return InstructionSet::kX86_64;
-  } else if (strcmp("mips", isa_str) == 0) {
-    return InstructionSet::kMips;
-  } else if (strcmp("mips64", isa_str) == 0) {
-    return InstructionSet::kMips64;
   }
 
   return InstructionSet::kNone;
@@ -93,10 +83,6 @@
       // Fall-through.
     case InstructionSet::kX86_64:
       return kX86Alignment;
-    case InstructionSet::kMips:
-      // Fall-through.
-    case InstructionSet::kMips64:
-      return kMipsAlignment;
     case InstructionSet::kNone:
       LOG(FATAL) << "ISA kNone does not have alignment.";
       UNREACHABLE();
@@ -109,9 +95,6 @@
 
 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
-static_assert(IsAligned<kPageSize>(kMipsStackOverflowReservedBytes), "Mips gap not page aligned");
-static_assert(IsAligned<kPageSize>(kMips64StackOverflowReservedBytes),
-              "Mips64 gap not page aligned");
 static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned");
 static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
               "X86_64 gap not page aligned");
@@ -124,10 +107,6 @@
 static_assert(ART_FRAME_SIZE_LIMIT < kArmStackOverflowReservedBytes, "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kArm64StackOverflowReservedBytes,
               "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kMipsStackOverflowReservedBytes,
-              "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kMips64StackOverflowReservedBytes,
-              "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kX86StackOverflowReservedBytes,
               "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kX86_64StackOverflowReservedBytes,
diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h
index 7e071bd..6f0cf52 100644
--- a/libartbase/arch/instruction_set.h
+++ b/libartbase/arch/instruction_set.h
@@ -32,9 +32,7 @@
   kThumb2,
   kX86,
   kX86_64,
-  kMips,
-  kMips64,
-  kLast = kMips64
+  kLast = kX86_64
 };
 std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
 
@@ -42,10 +40,6 @@
 static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm;
 #elif defined(__aarch64__)
 static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm64;
-#elif defined(__mips__) && !defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips;
-#elif defined(__mips__) && defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips64;
 #elif defined(__i386__)
 static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86;
 #elif defined(__x86_64__)
@@ -57,8 +51,6 @@
 // Architecture-specific pointer sizes
 static constexpr PointerSize kArmPointerSize = PointerSize::k32;
 static constexpr PointerSize kArm64PointerSize = PointerSize::k64;
-static constexpr PointerSize kMipsPointerSize = PointerSize::k32;
-static constexpr PointerSize kMips64PointerSize = PointerSize::k64;
 static constexpr PointerSize kX86PointerSize = PointerSize::k32;
 static constexpr PointerSize kX86_64PointerSize = PointerSize::k64;
 
@@ -69,10 +61,6 @@
 // ARM64 instruction alignment. This is the recommended alignment for maximum performance.
 static constexpr size_t kArm64Alignment = 16;
 
-// MIPS instruction alignment.  MIPS processors require code to be 4-byte aligned,
-// but 64-bit literals must be 8-byte aligned.
-static constexpr size_t kMipsAlignment = 8;
-
 // X86 instruction alignment. This is the recommended alignment for maximum performance.
 static constexpr size_t kX86Alignment = 16;
 
@@ -81,8 +69,6 @@
 static constexpr size_t kArm64InstructionAlignment = 4;
 static constexpr size_t kX86InstructionAlignment = 1;
 static constexpr size_t kX86_64InstructionAlignment = 1;
-static constexpr size_t kMipsInstructionAlignment = 4;
-static constexpr size_t kMips64InstructionAlignment = 4;
 
 const char* GetInstructionSetString(InstructionSet isa);
 
@@ -104,10 +90,6 @@
       return kX86PointerSize;
     case InstructionSet::kX86_64:
       return kX86_64PointerSize;
-    case InstructionSet::kMips:
-      return kMipsPointerSize;
-    case InstructionSet::kMips64:
-      return kMips64PointerSize;
 
     case InstructionSet::kNone:
       break;
@@ -127,10 +109,6 @@
       return kX86InstructionAlignment;
     case InstructionSet::kX86_64:
       return kX86_64InstructionAlignment;
-    case InstructionSet::kMips:
-      return kMipsInstructionAlignment;
-    case InstructionSet::kMips64:
-      return kMips64InstructionAlignment;
 
     case InstructionSet::kNone:
       break;
@@ -145,8 +123,6 @@
     case InstructionSet::kArm64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
       return true;
 
     case InstructionSet::kNone:
@@ -162,12 +138,10 @@
     case InstructionSet::kArm:
     case InstructionSet::kThumb2:
     case InstructionSet::kX86:
-    case InstructionSet::kMips:
       return false;
 
     case InstructionSet::kArm64:
     case InstructionSet::kX86_64:
-    case InstructionSet::kMips64:
       return true;
 
     case InstructionSet::kNone:
@@ -192,10 +166,6 @@
       return 4;
     case InstructionSet::kX86_64:
       return 8;
-    case InstructionSet::kMips:
-      return 4;
-    case InstructionSet::kMips64:
-      return 8;
 
     case InstructionSet::kNone:
       break;
@@ -215,10 +185,6 @@
       return 8;
     case InstructionSet::kX86_64:
       return 8;
-    case InstructionSet::kMips:
-      return 4;
-    case InstructionSet::kMips64:
-      return 8;
 
     case InstructionSet::kNone:
       break;
@@ -229,15 +195,12 @@
 namespace instruction_set_details {
 
 #if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
-    !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \
     !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64)
 #error "Missing defines for stack overflow gap"
 #endif
 
 static constexpr size_t kArmStackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_arm;
 static constexpr size_t kArm64StackOverflowReservedBytes  = ART_STACK_OVERFLOW_GAP_arm64;
-static constexpr size_t kMipsStackOverflowReservedBytes   = ART_STACK_OVERFLOW_GAP_mips;
-static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64;
 static constexpr size_t kX86StackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_x86;
 static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64;
 
@@ -255,12 +218,6 @@
     case InstructionSet::kArm64:
       return instruction_set_details::kArm64StackOverflowReservedBytes;
 
-    case InstructionSet::kMips:
-      return instruction_set_details::kMipsStackOverflowReservedBytes;
-
-    case InstructionSet::kMips64:
-      return instruction_set_details::kMips64StackOverflowReservedBytes;
-
     case InstructionSet::kX86:
       return instruction_set_details::kX86StackOverflowReservedBytes;
 
@@ -278,10 +235,10 @@
 // in registers so that memory operations for the interface trampolines can be avoided. The entities
 // are the resolved method and the pointer to the code to be invoked.
 //
-// On x86, ARM32 and MIPS, this is given for a *scalar* 64bit value. The definition thus *must* be
+// On x86 and ARM32, this is given for a *scalar* 64bit value. The definition thus *must* be
 // uint64_t or long long int.
 //
-// On x86_64, ARM64 and MIPS64, structs are decomposed for allocation, so we can create a structs of
+// On x86_64 and ARM64, structs are decomposed for allocation, so we can create a structs of
 // two size_t-sized values.
 //
 // We need two operations:
@@ -297,7 +254,7 @@
 //            when the garbage collector can move objects concurrently. Ensure that required locks
 //            are held when using!
 
-#if defined(__i386__) || defined(__arm__) || (defined(__mips__) && !defined(__LP64__))
+#if defined(__i386__) || defined(__arm__)
 typedef uint64_t TwoWordReturn;
 
 // Encodes method_ptr==nullptr and code_ptr==nullptr
@@ -313,7 +270,7 @@
   return ((hi64 << 32) | lo32);
 }
 
-#elif defined(__x86_64__) || defined(__aarch64__) || (defined(__mips__) && defined(__LP64__))
+#elif defined(__x86_64__) || defined(__aarch64__)
 
 // Note: TwoWordReturn can't be constexpr for 64-bit targets. We'd need a constexpr constructor,
 //       which would violate C-linkage in the entrypoint functions.
diff --git a/libartbase/arch/instruction_set_test.cc b/libartbase/arch/instruction_set_test.cc
index 12a117d..26071f1 100644
--- a/libartbase/arch/instruction_set_test.cc
+++ b/libartbase/arch/instruction_set_test.cc
@@ -27,8 +27,6 @@
   EXPECT_EQ(InstructionSet::kArm64, GetInstructionSetFromString("arm64"));
   EXPECT_EQ(InstructionSet::kX86, GetInstructionSetFromString("x86"));
   EXPECT_EQ(InstructionSet::kX86_64, GetInstructionSetFromString("x86_64"));
-  EXPECT_EQ(InstructionSet::kMips, GetInstructionSetFromString("mips"));
-  EXPECT_EQ(InstructionSet::kMips64, GetInstructionSetFromString("mips64"));
   EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("none"));
   EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("random-string"));
 }
@@ -39,8 +37,6 @@
   EXPECT_STREQ("arm64", GetInstructionSetString(InstructionSet::kArm64));
   EXPECT_STREQ("x86", GetInstructionSetString(InstructionSet::kX86));
   EXPECT_STREQ("x86_64", GetInstructionSetString(InstructionSet::kX86_64));
-  EXPECT_STREQ("mips", GetInstructionSetString(InstructionSet::kMips));
-  EXPECT_STREQ("mips64", GetInstructionSetString(InstructionSet::kMips64));
   EXPECT_STREQ("none", GetInstructionSetString(InstructionSet::kNone));
 }
 
@@ -53,10 +49,6 @@
             kX86InstructionAlignment);
   EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86_64),
             kX86_64InstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips),
-            kMipsInstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips64),
-            kMips64InstructionAlignment);
 }
 
 TEST(InstructionSetTest, TestRoundTrip) {
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index cdcfc3e..4b6257b 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -29,8 +29,7 @@
 
 namespace art {
 
-#if defined(__LP64__) && !defined(__Fuchsia__) && \
-    (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
+#if defined(__LP64__) && !defined(__Fuchsia__) && (defined(__aarch64__) || defined(__APPLE__))
 #define USE_ART_LOW_4G_ALLOCATOR 1
 #else
 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
diff --git a/libnativebridge/tests/NeedsNativeBridge_test.cpp b/libnativebridge/tests/NeedsNativeBridge_test.cpp
index c8ff743..3f80f8d 100644
--- a/libnativebridge/tests/NeedsNativeBridge_test.cpp
+++ b/libnativebridge/tests/NeedsNativeBridge_test.cpp
@@ -20,7 +20,7 @@
 
 namespace android {
 
-static const char* kISAs[] = { "arm", "arm64", "mips", "mips64", "x86", "x86_64", "random", "64arm",
+static const char* kISAs[] = { "arm", "arm64", "x86", "x86_64", "random", "64arm",
                                "64_x86", "64_x86_64", "", "reallylongstringabcd", nullptr };
 
 TEST_F(NativeBridgeTest, NeedsNativeBridge) {
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 76027f5..f58fe21 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -59,7 +59,7 @@
 //               doesn't have SA_RESTART, and raise the signal to avoid restarting syscalls that are
 //               expected to be interrupted?
 
-#if defined(__BIONIC__) && !defined(__LP64__) && !defined(__mips__)
+#if defined(__BIONIC__) && !defined(__LP64__)
 static int sigismember(const sigset64_t* sigset, int signum) {
   return sigismember64(sigset, signum);
 }
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 49fe369..6f97b2a 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -118,7 +118,7 @@
   sigfillset(&action.sa_mask);
   sigdelset(&action.sa_mask, UNBLOCKED_SIGNAL);
   action.sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__APPLE__) && !defined(__mips__)
+#if !defined(__APPLE__)
   action.sa_restorer = nullptr;
 #endif
 
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index cc7e806..754cb00 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -211,7 +211,7 @@
   struct sigaction tmp;
   sigemptyset(&tmp.sa_mask);
   tmp.sa_sigaction = test_sigaction_handler;
-#if !defined(__APPLE__) && !defined(__mips__)
+#if !defined(__APPLE__)
   tmp.sa_restorer = nullptr;
 #endif
 
diff --git a/test/496-checker-inlining-class-loader/src/Main.java b/test/496-checker-inlining-class-loader/src/Main.java
index 5deb77f..4fe4723 100644
--- a/test/496-checker-inlining-class-loader/src/Main.java
+++ b/test/496-checker-inlining-class-loader/src/Main.java
@@ -107,13 +107,13 @@
                 /* Load and initialize FirstSeenByMyClassLoader */
   /// CHECK:      LoadClass class_name:FirstSeenByMyClassLoader gen_clinit_check:true
                 /* Load and initialize System */
-  // There may be MipsComputeBaseMethodAddress here.
+  // There may be HX86ComputeBaseMethodAddress here.
   /// CHECK:      LoadClass class_name:java.lang.System
   // The ClinitCheck may (PIC) or may not (non-PIC) be merged into the LoadClass.
   // (The merging checks for environment match but HLoadClass/kBootImageAddress
   // used for non-PIC mode does not have an environment at all.)
   /// CHECK:      StaticFieldGet
-  // There may be HX86ComputeBaseMethodAddress or MipsComputeBaseMethodAddress here.
+  // There may be HX86ComputeBaseMethodAddress here.
   /// CHECK:      LoadString
   /// CHECK-NEXT: NullCheck
   /// CHECK-NEXT: InvokeVirtual
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 24f2dfb..657cc93 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -73,7 +73,7 @@
 
   public static int testDiamond(boolean negate, int x) {
     // These calls should use PC-relative loads to retrieve the target method.
-    // PC-relative bases used by MIPS32R2 and X86 should be pulled before the If.
+    // PC-relative bases used by X86 should be pulled before the If.
     if (negate) {
       return $noinline$foo(-x);
     } else {
@@ -100,7 +100,7 @@
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
   public static int testLoop(int[] array, int x) {
-    // PC-relative bases used by MIPS32R2 and X86 should be pulled before the loop.
+    // PC-relative bases used by X86 should be pulled before the loop.
     for (int i : array) {
       x += $noinline$foo(i);
     }
@@ -118,8 +118,7 @@
   /// CHECK-NEXT:           Goto
 
   public static int testLoopWithDiamond(int[] array, boolean negate, int x) {
-    // PC-relative bases used by MIPS32R2 and X86 should be pulled before the loop
-    // but not outside the if.
+    // PC-relative bases used by X86 should be pulled before the loop but not outside the if.
     if (array != null) {
       for (int i : array) {
         if (negate) {
diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java
index 7935eb3..3d939d6 100644
--- a/test/MyClassNatives/MyClassNatives.java
+++ b/test/MyClassNatives/MyClassNatives.java
@@ -141,10 +141,6 @@
         float f9, int i10, float f10);
 
     // Normal native
-    native static long getStackArgSignExtendedMips64(int i1, int i2, int i3, int i4, int i5, int i6,
-        int stack_arg);
-
-    // Normal native
     static native double logD(double d);
     // Normal native
     static native float logF(float f);
@@ -277,10 +273,6 @@
         float f9, int i10, float f10);
 
     @FastNative
-    native static long getStackArgSignExtendedMips64_Fast(int i1, int i2, int i3, int i4, int i5, int i6,
-        int stack_arg);
-
-    @FastNative
     static native double logD_Fast(double d);
     @FastNative
     static native float logF_Fast(float f);
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 3068cd8..418dd10 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -17,8 +17,8 @@
 ANDROID_ART_ROOT="/apex/com.android.art"
 ANDROID_I18N_ROOT="/apex/com.android.i18n"
 ANDROID_TZDATA_ROOT="/apex/com.android.tzdata"
-ARCHITECTURES_32="(arm|x86|mips|none)"
-ARCHITECTURES_64="(arm64|x86_64|mips64|none)"
+ARCHITECTURES_32="(arm|x86|none)"
+ARCHITECTURES_64="(arm64|x86_64|none)"
 ARCHITECTURES_PATTERN="${ARCHITECTURES_32}"
 GET_DEVICE_ISA_BITNESS_FLAG="--32"
 BOOT_IMAGE=""
diff --git a/test/run-test b/test/run-test
index 4d62c6f..66039b7 100755
--- a/test/run-test
+++ b/test/run-test
@@ -627,8 +627,8 @@
             target_arch_name=x86
         fi
     else
-        grep32bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm|x86|mips)$'`
-        grep64bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm64|x86_64|mips64)$'`
+        grep32bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm|x86)$'`
+        grep64bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm64|x86_64)$'`
         if [ "x${suffix64}" = "x64" ]; then
             target_arch_name=${grep64bit}
         else
diff --git a/test/utils/get-device-isa b/test/utils/get-device-isa
index 0e19b9e..c9b342d 100755
--- a/test/utils/get-device-isa
+++ b/test/utils/get-device-isa
@@ -29,8 +29,8 @@
   usage
 fi
 
-ARCHITECTURES_32="(arm|x86|mips|none)"
-ARCHITECTURES_64="(arm64|x86_64|mips64|none)"
+ARCHITECTURES_32="(arm|x86|none)"
+ARCHITECTURES_64="(arm64|x86_64|none)"
 
 case "$1" in
   (--32)
diff --git a/tools/bisection_search/bisection_search.py b/tools/bisection_search/bisection_search.py
index 250b5d1..102bbad 100755
--- a/tools/bisection_search/bisection_search.py
+++ b/tools/bisection_search/bisection_search.py
@@ -46,10 +46,8 @@
 # Passes that are never disabled during search process because disabling them
 # would compromise correctness.
 MANDATORY_PASSES = ['dex_cache_array_fixups_arm',
-                    'dex_cache_array_fixups_mips',
                     'instruction_simplifier$before_codegen',
                     'pc_relative_fixups_x86',
-                    'pc_relative_fixups_mips',
                     'x86_memory_operand_generation']
 
 # Passes that show up as optimizations in compiler verbose output but aren't
diff --git a/tools/checker/README b/tools/checker/README
index b8dd803..8a6b128 100644
--- a/tools/checker/README
+++ b/tools/checker/README
@@ -82,4 +82,4 @@
 thereby avoiding to repeat the check lines if some, but not all architectures
 match. An example line looks like:
 
-  /// CHECK-START-{MIPS,ARM,ARM64}: int MyClass.MyMethod() constant_folding (after)
+  /// CHECK-START-{X86_64,ARM,ARM64}: int MyClass.MyMethod() constant_folding (after)
diff --git a/tools/checker/common/archs.py b/tools/checker/common/archs.py
index 178e0b5..9628c88 100644
--- a/tools/checker/common/archs.py
+++ b/tools/checker/common/archs.py
@@ -12,4 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
+archs_list = ['ARM', 'ARM64', 'X86', 'X86_64']
diff --git a/tools/signal_dumper/signal_dumper.cc b/tools/signal_dumper/signal_dumper.cc
index eb95b30..e9a589e 100644
--- a/tools/signal_dumper/signal_dumper.cc
+++ b/tools/signal_dumper/signal_dumper.cc
@@ -368,15 +368,11 @@
 }
 
 void DumpABI(pid_t forked_pid) {
-  enum class ABI { kArm, kArm64, kMips, kMips64, kX86, kX86_64 };
+  enum class ABI { kArm, kArm64, kX86, kX86_64 };
 #if defined(__arm__)
   constexpr ABI kDumperABI = ABI::kArm;
 #elif defined(__aarch64__)
   constexpr ABI kDumperABI = ABI::kArm64;
-#elif defined(__mips__) && !defined(__LP64__)
-  constexpr ABI kDumperABI = ABI::kMips;
-#elif defined(__mips__) && defined(__LP64__)
-  constexpr ABI kDumperABI = ABI::kMips64;
 #elif defined(__i386__)
   constexpr ABI kDumperABI = ABI::kX86;
 #elif defined(__x86_64__)
@@ -398,10 +394,6 @@
       case ABI::kArm64:
         to_print = ABI::kArm64;
         break;
-      case ABI::kMips:
-      case ABI::kMips64:
-        to_print = ABI::kMips64;
-        break;
       case ABI::kX86:
       case ABI::kX86_64:
         to_print = ABI::kX86_64;
@@ -416,10 +408,6 @@
       case ABI::kArm64:
         to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64;
         break;
-      case ABI::kMips:
-      case ABI::kMips64:
-        to_print = ABI::kMips64;  // TODO Figure out how this should work.
-        break;
       case ABI::kX86:
       case ABI::kX86_64:
         to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64;
@@ -436,12 +424,6 @@
     case ABI::kArm64:
       abi_str = "arm64";
       break;
-    case ABI::kMips:
-      abi_str = "mips";
-      break;
-    case ABI::kMips64:
-      abi_str = "mips64";
-      break;
     case ABI::kX86:
       abi_str = "x86";
       break;